ethdev: separate driver APIs
[dpdk.git] / drivers / net / enic / enic_rxtx.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2008-2017 Cisco Systems, Inc.  All rights reserved.
3  * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
4  */
5
6 #include <rte_mbuf.h>
7 #include <rte_ethdev_driver.h>
8 #include <rte_prefetch.h>
9
10 #include "enic_compat.h"
11 #include "rq_enet_desc.h"
12 #include "enic.h"
13 #include <rte_ether.h>
14 #include <rte_ip.h>
15 #include <rte_tcp.h>
16
17 #define RTE_PMD_USE_PREFETCH
18
19 #ifdef RTE_PMD_USE_PREFETCH
20 /*Prefetch a cache line into all cache levels. */
21 #define rte_enic_prefetch(p) rte_prefetch0(p)
22 #else
23 #define rte_enic_prefetch(p) do {} while (0)
24 #endif
25
26 #ifdef RTE_PMD_PACKET_PREFETCH
27 #define rte_packet_prefetch(p) rte_prefetch1(p)
28 #else
29 #define rte_packet_prefetch(p) do {} while (0)
30 #endif
31
32 static inline uint16_t
33 enic_cq_rx_desc_ciflags(struct cq_enet_rq_desc *crd)
34 {
35         return le16_to_cpu(crd->completed_index_flags) & ~CQ_DESC_COMP_NDX_MASK;
36 }
37
38 static inline uint16_t
39 enic_cq_rx_desc_bwflags(struct cq_enet_rq_desc *crd)
40 {
41         return le16_to_cpu(crd->bytes_written_flags) &
42                            ~CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
43 }
44
45 static inline uint8_t
46 enic_cq_rx_desc_packet_error(uint16_t bwflags)
47 {
48         return (bwflags & CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ==
49                 CQ_ENET_RQ_DESC_FLAGS_TRUNCATED;
50 }
51
52 static inline uint8_t
53 enic_cq_rx_desc_eop(uint16_t ciflags)
54 {
55         return (ciflags & CQ_ENET_RQ_DESC_FLAGS_EOP)
56                 == CQ_ENET_RQ_DESC_FLAGS_EOP;
57 }
58
59 static inline uint8_t
60 enic_cq_rx_desc_csum_not_calc(struct cq_enet_rq_desc *cqrd)
61 {
62         return (le16_to_cpu(cqrd->q_number_rss_type_flags) &
63                 CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ==
64                 CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC;
65 }
66
67 static inline uint8_t
68 enic_cq_rx_desc_ipv4_csum_ok(struct cq_enet_rq_desc *cqrd)
69 {
70         return (cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ==
71                 CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK;
72 }
73
74 static inline uint8_t
75 enic_cq_rx_desc_tcp_udp_csum_ok(struct cq_enet_rq_desc *cqrd)
76 {
77         return (cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ==
78                 CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK;
79 }
80
81 static inline uint8_t
82 enic_cq_rx_desc_rss_type(struct cq_enet_rq_desc *cqrd)
83 {
84         return (uint8_t)((le16_to_cpu(cqrd->q_number_rss_type_flags) >>
85                 CQ_DESC_Q_NUM_BITS) & CQ_ENET_RQ_DESC_RSS_TYPE_MASK);
86 }
87
88 static inline uint32_t
89 enic_cq_rx_desc_rss_hash(struct cq_enet_rq_desc *cqrd)
90 {
91         return le32_to_cpu(cqrd->rss_hash);
92 }
93
94 static inline uint16_t
95 enic_cq_rx_desc_vlan(struct cq_enet_rq_desc *cqrd)
96 {
97         return le16_to_cpu(cqrd->vlan);
98 }
99
100 static inline uint16_t
101 enic_cq_rx_desc_n_bytes(struct cq_desc *cqd)
102 {
103         struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
104         return le16_to_cpu(cqrd->bytes_written_flags) &
105                 CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
106 }
107
108
109 static inline uint8_t
110 enic_cq_rx_check_err(struct cq_desc *cqd)
111 {
112         struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
113         uint16_t bwflags;
114
115         bwflags = enic_cq_rx_desc_bwflags(cqrd);
116         if (unlikely(enic_cq_rx_desc_packet_error(bwflags)))
117                 return 1;
118         return 0;
119 }
120
121 /* Lookup table to translate RX CQ flags to mbuf flags. */
122 static inline uint32_t
123 enic_cq_rx_flags_to_pkt_type(struct cq_desc *cqd)
124 {
125         struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
126         uint8_t cqrd_flags = cqrd->flags;
127         static const uint32_t cq_type_table[128] __rte_cache_aligned = {
128                 [0x00] = RTE_PTYPE_UNKNOWN,
129                 [0x20] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG,
130                 [0x22] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,
131                 [0x24] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,
132                 [0x60] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
133                 [0x62] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,
134                 [0x64] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,
135                 [0x10] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG,
136                 [0x12] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,
137                 [0x14] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,
138                 [0x50] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
139                 [0x52] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,
140                 [0x54] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,
141                 /* All others reserved */
142         };
143         cqrd_flags &= CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT
144                 | CQ_ENET_RQ_DESC_FLAGS_IPV4 | CQ_ENET_RQ_DESC_FLAGS_IPV6
145                 | CQ_ENET_RQ_DESC_FLAGS_TCP | CQ_ENET_RQ_DESC_FLAGS_UDP;
146         return cq_type_table[cqrd_flags];
147 }
148
149 static inline void
150 enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf)
151 {
152         struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
153         uint16_t bwflags, pkt_flags = 0, vlan_tci;
154         bwflags = enic_cq_rx_desc_bwflags(cqrd);
155         vlan_tci = enic_cq_rx_desc_vlan(cqrd);
156
157         /* VLAN STRIPPED flag. The L2 packet type updated here also */
158         if (bwflags & CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) {
159                 pkt_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
160                 mbuf->packet_type |= RTE_PTYPE_L2_ETHER;
161         } else {
162                 if (vlan_tci != 0)
163                         mbuf->packet_type |= RTE_PTYPE_L2_ETHER_VLAN;
164                 else
165                         mbuf->packet_type |= RTE_PTYPE_L2_ETHER;
166         }
167         mbuf->vlan_tci = vlan_tci;
168
169         if ((cqd->type_color & CQ_DESC_TYPE_MASK) == CQ_DESC_TYPE_CLASSIFIER) {
170                 struct cq_enet_rq_clsf_desc *clsf_cqd;
171                 uint16_t filter_id;
172                 clsf_cqd = (struct cq_enet_rq_clsf_desc *)cqd;
173                 filter_id = clsf_cqd->filter_id;
174                 if (filter_id) {
175                         pkt_flags |= PKT_RX_FDIR;
176                         if (filter_id != ENIC_MAGIC_FILTER_ID) {
177                                 mbuf->hash.fdir.hi = clsf_cqd->filter_id;
178                                 pkt_flags |= PKT_RX_FDIR_ID;
179                         }
180                 }
181         } else if (enic_cq_rx_desc_rss_type(cqrd)) {
182                 /* RSS flag */
183                 pkt_flags |= PKT_RX_RSS_HASH;
184                 mbuf->hash.rss = enic_cq_rx_desc_rss_hash(cqrd);
185         }
186
187         /* checksum flags */
188         if (mbuf->packet_type & RTE_PTYPE_L3_IPV4) {
189                 if (!enic_cq_rx_desc_csum_not_calc(cqrd)) {
190                         uint32_t l4_flags;
191                         l4_flags = mbuf->packet_type & RTE_PTYPE_L4_MASK;
192
193                         if (enic_cq_rx_desc_ipv4_csum_ok(cqrd))
194                                 pkt_flags |= PKT_RX_IP_CKSUM_GOOD;
195                         else
196                                 pkt_flags |= PKT_RX_IP_CKSUM_BAD;
197
198                         if (l4_flags == RTE_PTYPE_L4_UDP ||
199                             l4_flags == RTE_PTYPE_L4_TCP) {
200                                 if (enic_cq_rx_desc_tcp_udp_csum_ok(cqrd))
201                                         pkt_flags |= PKT_RX_L4_CKSUM_GOOD;
202                                 else
203                                         pkt_flags |= PKT_RX_L4_CKSUM_BAD;
204                         }
205                 }
206         }
207
208         mbuf->ol_flags = pkt_flags;
209 }
210
211 /* dummy receive function to replace actual function in
212  * order to do safe reconfiguration operations.
213  */
214 uint16_t
215 enic_dummy_recv_pkts(__rte_unused void *rx_queue,
216                      __rte_unused struct rte_mbuf **rx_pkts,
217                      __rte_unused uint16_t nb_pkts)
218 {
219         return 0;
220 }
221
222 uint16_t
223 enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
224                uint16_t nb_pkts)
225 {
226         struct vnic_rq *sop_rq = rx_queue;
227         struct vnic_rq *data_rq;
228         struct vnic_rq *rq;
229         struct enic *enic = vnic_dev_priv(sop_rq->vdev);
230         uint16_t cq_idx;
231         uint16_t rq_idx;
232         uint16_t rq_num;
233         struct rte_mbuf *nmb, *rxmb;
234         uint16_t nb_rx = 0;
235         struct vnic_cq *cq;
236         volatile struct cq_desc *cqd_ptr;
237         uint8_t color;
238         uint16_t seg_length;
239         struct rte_mbuf *first_seg = sop_rq->pkt_first_seg;
240         struct rte_mbuf *last_seg = sop_rq->pkt_last_seg;
241
242         cq = &enic->cq[enic_cq_rq(enic, sop_rq->index)];
243         cq_idx = cq->to_clean;          /* index of cqd, rqd, mbuf_table */
244         cqd_ptr = (struct cq_desc *)(cq->ring.descs) + cq_idx;
245
246         data_rq = &enic->rq[sop_rq->data_queue_idx];
247
248         while (nb_rx < nb_pkts) {
249                 volatile struct rq_enet_desc *rqd_ptr;
250                 struct cq_desc cqd;
251                 uint8_t packet_error;
252                 uint16_t ciflags;
253
254                 /* Check for pkts available */
255                 color = (cqd_ptr->type_color >> CQ_DESC_COLOR_SHIFT)
256                         & CQ_DESC_COLOR_MASK;
257                 if (color == cq->last_color)
258                         break;
259
260                 /* Get the cq descriptor and extract rq info from it */
261                 cqd = *cqd_ptr;
262                 rq_num = cqd.q_number & CQ_DESC_Q_NUM_MASK;
263                 rq_idx = cqd.completed_index & CQ_DESC_COMP_NDX_MASK;
264
265                 rq = &enic->rq[rq_num];
266                 rqd_ptr = ((struct rq_enet_desc *)rq->ring.descs) + rq_idx;
267
268                 /* allocate a new mbuf */
269                 nmb = rte_mbuf_raw_alloc(rq->mp);
270                 if (nmb == NULL) {
271                         rte_atomic64_inc(&enic->soft_stats.rx_nombuf);
272                         break;
273                 }
274
275                 /* A packet error means descriptor and data are untrusted */
276                 packet_error = enic_cq_rx_check_err(&cqd);
277
278                 /* Get the mbuf to return and replace with one just allocated */
279                 rxmb = rq->mbuf_ring[rq_idx];
280                 rq->mbuf_ring[rq_idx] = nmb;
281
282                 /* Increment cqd, rqd, mbuf_table index */
283                 cq_idx++;
284                 if (unlikely(cq_idx == cq->ring.desc_count)) {
285                         cq_idx = 0;
286                         cq->last_color = cq->last_color ? 0 : 1;
287                 }
288
289                 /* Prefetch next mbuf & desc while processing current one */
290                 cqd_ptr = (struct cq_desc *)(cq->ring.descs) + cq_idx;
291                 rte_enic_prefetch(cqd_ptr);
292
293                 ciflags = enic_cq_rx_desc_ciflags(
294                         (struct cq_enet_rq_desc *)&cqd);
295
296                 /* Push descriptor for newly allocated mbuf */
297                 nmb->data_off = RTE_PKTMBUF_HEADROOM;
298                 /*
299                  * Only the address needs to be refilled. length_type of the
300                  * descriptor it set during initialization
301                  * (enic_alloc_rx_queue_mbufs) and does not change.
302                  */
303                 rqd_ptr->address = rte_cpu_to_le_64(nmb->buf_iova +
304                                                     RTE_PKTMBUF_HEADROOM);
305
306                 /* Fill in the rest of the mbuf */
307                 seg_length = enic_cq_rx_desc_n_bytes(&cqd);
308
309                 if (rq->is_sop) {
310                         first_seg = rxmb;
311                         first_seg->pkt_len = seg_length;
312                 } else {
313                         first_seg->pkt_len = (uint16_t)(first_seg->pkt_len
314                                                         + seg_length);
315                         first_seg->nb_segs++;
316                         last_seg->next = rxmb;
317                 }
318
319                 rxmb->port = enic->port_id;
320                 rxmb->data_len = seg_length;
321
322                 rq->rx_nb_hold++;
323
324                 if (!(enic_cq_rx_desc_eop(ciflags))) {
325                         last_seg = rxmb;
326                         continue;
327                 }
328
329                 /* cq rx flags are only valid if eop bit is set */
330                 first_seg->packet_type = enic_cq_rx_flags_to_pkt_type(&cqd);
331                 enic_cq_rx_to_pkt_flags(&cqd, first_seg);
332
333                 if (unlikely(packet_error)) {
334                         rte_pktmbuf_free(first_seg);
335                         rte_atomic64_inc(&enic->soft_stats.rx_packet_errors);
336                         continue;
337                 }
338
339
340                 /* prefetch mbuf data for caller */
341                 rte_packet_prefetch(RTE_PTR_ADD(first_seg->buf_addr,
342                                     RTE_PKTMBUF_HEADROOM));
343
344                 /* store the mbuf address into the next entry of the array */
345                 rx_pkts[nb_rx++] = first_seg;
346         }
347
348         sop_rq->pkt_first_seg = first_seg;
349         sop_rq->pkt_last_seg = last_seg;
350
351         cq->to_clean = cq_idx;
352
353         if ((sop_rq->rx_nb_hold + data_rq->rx_nb_hold) >
354             sop_rq->rx_free_thresh) {
355                 if (data_rq->in_use) {
356                         data_rq->posted_index =
357                                 enic_ring_add(data_rq->ring.desc_count,
358                                               data_rq->posted_index,
359                                               data_rq->rx_nb_hold);
360                         data_rq->rx_nb_hold = 0;
361                 }
362                 sop_rq->posted_index = enic_ring_add(sop_rq->ring.desc_count,
363                                                      sop_rq->posted_index,
364                                                      sop_rq->rx_nb_hold);
365                 sop_rq->rx_nb_hold = 0;
366
367                 rte_mb();
368                 if (data_rq->in_use)
369                         iowrite32_relaxed(data_rq->posted_index,
370                                           &data_rq->ctrl->posted_index);
371                 rte_compiler_barrier();
372                 iowrite32_relaxed(sop_rq->posted_index,
373                                   &sop_rq->ctrl->posted_index);
374         }
375
376
377         return nb_rx;
378 }
379
380 static inline void enic_free_wq_bufs(struct vnic_wq *wq, u16 completed_index)
381 {
382         struct vnic_wq_buf *buf;
383         struct rte_mbuf *m, *free[ENIC_MAX_WQ_DESCS];
384         unsigned int nb_to_free, nb_free = 0, i;
385         struct rte_mempool *pool;
386         unsigned int tail_idx;
387         unsigned int desc_count = wq->ring.desc_count;
388
389         nb_to_free = enic_ring_sub(desc_count, wq->tail_idx, completed_index)
390                                    + 1;
391         tail_idx = wq->tail_idx;
392         buf = &wq->bufs[tail_idx];
393         pool = ((struct rte_mbuf *)buf->mb)->pool;
394         for (i = 0; i < nb_to_free; i++) {
395                 buf = &wq->bufs[tail_idx];
396                 m = rte_pktmbuf_prefree_seg((struct rte_mbuf *)(buf->mb));
397                 buf->mb = NULL;
398
399                 if (unlikely(m == NULL)) {
400                         tail_idx = enic_ring_incr(desc_count, tail_idx);
401                         continue;
402                 }
403
404                 if (likely(m->pool == pool)) {
405                         RTE_ASSERT(nb_free < ENIC_MAX_WQ_DESCS);
406                         free[nb_free++] = m;
407                 } else {
408                         rte_mempool_put_bulk(pool, (void *)free, nb_free);
409                         free[0] = m;
410                         nb_free = 1;
411                         pool = m->pool;
412                 }
413                 tail_idx = enic_ring_incr(desc_count, tail_idx);
414         }
415
416         if (nb_free > 0)
417                 rte_mempool_put_bulk(pool, (void **)free, nb_free);
418
419         wq->tail_idx = tail_idx;
420         wq->ring.desc_avail += nb_to_free;
421 }
422
423 unsigned int enic_cleanup_wq(__rte_unused struct enic *enic, struct vnic_wq *wq)
424 {
425         u16 completed_index;
426
427         completed_index = *((uint32_t *)wq->cqmsg_rz->addr) & 0xffff;
428
429         if (wq->last_completed_index != completed_index) {
430                 enic_free_wq_bufs(wq, completed_index);
431                 wq->last_completed_index = completed_index;
432         }
433         return 0;
434 }
435
436 uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
437         uint16_t nb_pkts)
438 {
439         uint16_t index;
440         unsigned int pkt_len, data_len;
441         unsigned int nb_segs;
442         struct rte_mbuf *tx_pkt;
443         struct vnic_wq *wq = (struct vnic_wq *)tx_queue;
444         struct enic *enic = vnic_dev_priv(wq->vdev);
445         unsigned short vlan_id;
446         uint64_t ol_flags;
447         uint64_t ol_flags_mask;
448         unsigned int wq_desc_avail;
449         int head_idx;
450         struct vnic_wq_buf *buf;
451         unsigned int desc_count;
452         struct wq_enet_desc *descs, *desc_p, desc_tmp;
453         uint16_t mss;
454         uint8_t vlan_tag_insert;
455         uint8_t eop;
456         uint64_t bus_addr;
457         uint8_t offload_mode;
458         uint16_t header_len;
459         uint64_t tso;
460         rte_atomic64_t *tx_oversized;
461
462         enic_cleanup_wq(enic, wq);
463         wq_desc_avail = vnic_wq_desc_avail(wq);
464         head_idx = wq->head_idx;
465         desc_count = wq->ring.desc_count;
466         ol_flags_mask = PKT_TX_VLAN_PKT | PKT_TX_IP_CKSUM | PKT_TX_L4_MASK;
467         tx_oversized = &enic->soft_stats.tx_oversized;
468
469         nb_pkts = RTE_MIN(nb_pkts, ENIC_TX_XMIT_MAX);
470
471         for (index = 0; index < nb_pkts; index++) {
472                 tx_pkt = *tx_pkts++;
473                 pkt_len = tx_pkt->pkt_len;
474                 data_len = tx_pkt->data_len;
475                 ol_flags = tx_pkt->ol_flags;
476                 nb_segs = tx_pkt->nb_segs;
477                 tso = ol_flags & PKT_TX_TCP_SEG;
478
479                 /* drop packet if it's too big to send */
480                 if (unlikely(!tso && pkt_len > ENIC_TX_MAX_PKT_SIZE)) {
481                         rte_pktmbuf_free(tx_pkt);
482                         rte_atomic64_inc(tx_oversized);
483                         continue;
484                 }
485
486                 if (nb_segs > wq_desc_avail) {
487                         if (index > 0)
488                                 goto post;
489                         goto done;
490                 }
491
492                 mss = 0;
493                 vlan_id = tx_pkt->vlan_tci;
494                 vlan_tag_insert = !!(ol_flags & PKT_TX_VLAN_PKT);
495                 bus_addr = (dma_addr_t)
496                            (tx_pkt->buf_iova + tx_pkt->data_off);
497
498                 descs = (struct wq_enet_desc *)wq->ring.descs;
499                 desc_p = descs + head_idx;
500
501                 eop = (data_len == pkt_len);
502                 offload_mode = WQ_ENET_OFFLOAD_MODE_CSUM;
503                 header_len = 0;
504
505                 if (tso) {
506                         header_len = tx_pkt->l2_len + tx_pkt->l3_len +
507                                      tx_pkt->l4_len;
508
509                         /* Drop if non-TCP packet or TSO seg size is too big */
510                         if (unlikely(header_len == 0 || ((tx_pkt->tso_segsz +
511                             header_len) > ENIC_TX_MAX_PKT_SIZE))) {
512                                 rte_pktmbuf_free(tx_pkt);
513                                 rte_atomic64_inc(tx_oversized);
514                                 continue;
515                         }
516
517                         offload_mode = WQ_ENET_OFFLOAD_MODE_TSO;
518                         mss = tx_pkt->tso_segsz;
519                 }
520
521                 if ((ol_flags & ol_flags_mask) && (header_len == 0)) {
522                         if (ol_flags & PKT_TX_IP_CKSUM)
523                                 mss |= ENIC_CALC_IP_CKSUM;
524
525                         /* Nic uses just 1 bit for UDP and TCP */
526                         switch (ol_flags & PKT_TX_L4_MASK) {
527                         case PKT_TX_TCP_CKSUM:
528                         case PKT_TX_UDP_CKSUM:
529                                 mss |= ENIC_CALC_TCP_UDP_CKSUM;
530                                 break;
531                         }
532                 }
533
534
535                 wq_enet_desc_enc(&desc_tmp, bus_addr, data_len, mss, header_len,
536                                  offload_mode, eop, eop, 0, vlan_tag_insert,
537                                  vlan_id, 0);
538
539                 *desc_p = desc_tmp;
540                 buf = &wq->bufs[head_idx];
541                 buf->mb = (void *)tx_pkt;
542                 head_idx = enic_ring_incr(desc_count, head_idx);
543                 wq_desc_avail--;
544
545                 if (!eop) {
546                         for (tx_pkt = tx_pkt->next; tx_pkt; tx_pkt =
547                             tx_pkt->next) {
548                                 data_len = tx_pkt->data_len;
549
550                                 if (tx_pkt->next == NULL)
551                                         eop = 1;
552                                 desc_p = descs + head_idx;
553                                 bus_addr = (dma_addr_t)(tx_pkt->buf_iova
554                                            + tx_pkt->data_off);
555                                 wq_enet_desc_enc((struct wq_enet_desc *)
556                                                  &desc_tmp, bus_addr, data_len,
557                                                  mss, 0, offload_mode, eop, eop,
558                                                  0, vlan_tag_insert, vlan_id,
559                                                  0);
560
561                                 *desc_p = desc_tmp;
562                                 buf = &wq->bufs[head_idx];
563                                 buf->mb = (void *)tx_pkt;
564                                 head_idx = enic_ring_incr(desc_count, head_idx);
565                                 wq_desc_avail--;
566                         }
567                 }
568         }
569  post:
570         rte_wmb();
571         iowrite32_relaxed(head_idx, &wq->ctrl->posted_index);
572  done:
573         wq->ring.desc_avail = wq_desc_avail;
574         wq->head_idx = head_idx;
575
576         return index;
577 }
578
579