net/enic: support mbuf fast free offload
[dpdk.git] / drivers / net / enic / enic_rxtx.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2008-2017 Cisco Systems, Inc.  All rights reserved.
3  * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
4  */
5
6 #include <rte_mbuf.h>
7 #include <rte_ethdev_driver.h>
8 #include <rte_net.h>
9 #include <rte_prefetch.h>
10
11 #include "enic_compat.h"
12 #include "rq_enet_desc.h"
13 #include "enic.h"
14 #include <rte_ether.h>
15 #include <rte_ip.h>
16 #include <rte_tcp.h>
17
18 #define RTE_PMD_USE_PREFETCH
19
20 #ifdef RTE_PMD_USE_PREFETCH
21 /*Prefetch a cache line into all cache levels. */
22 #define rte_enic_prefetch(p) rte_prefetch0(p)
23 #else
24 #define rte_enic_prefetch(p) do {} while (0)
25 #endif
26
27 #ifdef RTE_PMD_PACKET_PREFETCH
28 #define rte_packet_prefetch(p) rte_prefetch1(p)
29 #else
30 #define rte_packet_prefetch(p) do {} while (0)
31 #endif
32
33 static inline uint16_t
34 enic_cq_rx_desc_ciflags(struct cq_enet_rq_desc *crd)
35 {
36         return le16_to_cpu(crd->completed_index_flags) & ~CQ_DESC_COMP_NDX_MASK;
37 }
38
39 static inline uint16_t
40 enic_cq_rx_desc_bwflags(struct cq_enet_rq_desc *crd)
41 {
42         return le16_to_cpu(crd->bytes_written_flags) &
43                            ~CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
44 }
45
46 static inline uint8_t
47 enic_cq_rx_desc_packet_error(uint16_t bwflags)
48 {
49         return (bwflags & CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ==
50                 CQ_ENET_RQ_DESC_FLAGS_TRUNCATED;
51 }
52
53 static inline uint8_t
54 enic_cq_rx_desc_eop(uint16_t ciflags)
55 {
56         return (ciflags & CQ_ENET_RQ_DESC_FLAGS_EOP)
57                 == CQ_ENET_RQ_DESC_FLAGS_EOP;
58 }
59
60 static inline uint8_t
61 enic_cq_rx_desc_csum_not_calc(struct cq_enet_rq_desc *cqrd)
62 {
63         return (le16_to_cpu(cqrd->q_number_rss_type_flags) &
64                 CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ==
65                 CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC;
66 }
67
68 static inline uint8_t
69 enic_cq_rx_desc_ipv4_csum_ok(struct cq_enet_rq_desc *cqrd)
70 {
71         return (cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ==
72                 CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK;
73 }
74
75 static inline uint8_t
76 enic_cq_rx_desc_tcp_udp_csum_ok(struct cq_enet_rq_desc *cqrd)
77 {
78         return (cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ==
79                 CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK;
80 }
81
82 static inline uint8_t
83 enic_cq_rx_desc_rss_type(struct cq_enet_rq_desc *cqrd)
84 {
85         return (uint8_t)((le16_to_cpu(cqrd->q_number_rss_type_flags) >>
86                 CQ_DESC_Q_NUM_BITS) & CQ_ENET_RQ_DESC_RSS_TYPE_MASK);
87 }
88
89 static inline uint32_t
90 enic_cq_rx_desc_rss_hash(struct cq_enet_rq_desc *cqrd)
91 {
92         return le32_to_cpu(cqrd->rss_hash);
93 }
94
95 static inline uint16_t
96 enic_cq_rx_desc_vlan(struct cq_enet_rq_desc *cqrd)
97 {
98         return le16_to_cpu(cqrd->vlan);
99 }
100
101 static inline uint16_t
102 enic_cq_rx_desc_n_bytes(struct cq_desc *cqd)
103 {
104         struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
105         return le16_to_cpu(cqrd->bytes_written_flags) &
106                 CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
107 }
108
109
110 static inline uint8_t
111 enic_cq_rx_check_err(struct cq_desc *cqd)
112 {
113         struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
114         uint16_t bwflags;
115
116         bwflags = enic_cq_rx_desc_bwflags(cqrd);
117         if (unlikely(enic_cq_rx_desc_packet_error(bwflags)))
118                 return 1;
119         return 0;
120 }
121
122 /* Lookup table to translate RX CQ flags to mbuf flags. */
123 static inline uint32_t
124 enic_cq_rx_flags_to_pkt_type(struct cq_desc *cqd, uint8_t tnl)
125 {
126         struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
127         uint8_t cqrd_flags = cqrd->flags;
128         /*
129          * Odd-numbered entries are for tunnel packets. All packet type info
130          * applies to the inner packet, and there is no info on the outer
131          * packet. The outer flags in these entries exist only to avoid
132          * changing enic_cq_rx_to_pkt_flags(). They are cleared from mbuf
133          * afterwards.
134          *
135          * Also, as there is no tunnel type info (VXLAN, NVGRE, or GENEVE), set
136          * RTE_PTYPE_TUNNEL_GRENAT..
137          */
138         static const uint32_t cq_type_table[128] __rte_cache_aligned = {
139                 [0x00] = RTE_PTYPE_UNKNOWN,
140                 [0x01] = RTE_PTYPE_UNKNOWN |
141                          RTE_PTYPE_TUNNEL_GRENAT |
142                          RTE_PTYPE_INNER_L2_ETHER,
143                 [0x20] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG,
144                 [0x21] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG |
145                          RTE_PTYPE_TUNNEL_GRENAT |
146                          RTE_PTYPE_INNER_L2_ETHER |
147                          RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
148                          RTE_PTYPE_INNER_L4_NONFRAG,
149                 [0x22] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,
150                 [0x23] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
151                          RTE_PTYPE_TUNNEL_GRENAT |
152                          RTE_PTYPE_INNER_L2_ETHER |
153                          RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
154                          RTE_PTYPE_INNER_L4_UDP,
155                 [0x24] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,
156                 [0x25] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP |
157                          RTE_PTYPE_TUNNEL_GRENAT |
158                          RTE_PTYPE_INNER_L2_ETHER |
159                          RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
160                          RTE_PTYPE_INNER_L4_TCP,
161                 [0x60] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
162                 [0x61] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
163                          RTE_PTYPE_TUNNEL_GRENAT |
164                          RTE_PTYPE_INNER_L2_ETHER |
165                          RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
166                          RTE_PTYPE_INNER_L4_FRAG,
167                 [0x62] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
168                 [0x63] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
169                          RTE_PTYPE_TUNNEL_GRENAT |
170                          RTE_PTYPE_INNER_L2_ETHER |
171                          RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
172                          RTE_PTYPE_INNER_L4_FRAG,
173                 [0x64] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
174                 [0x65] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
175                          RTE_PTYPE_TUNNEL_GRENAT |
176                          RTE_PTYPE_INNER_L2_ETHER |
177                          RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
178                          RTE_PTYPE_INNER_L4_FRAG,
179                 [0x10] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG,
180                 [0x11] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG |
181                          RTE_PTYPE_TUNNEL_GRENAT |
182                          RTE_PTYPE_INNER_L2_ETHER |
183                          RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
184                          RTE_PTYPE_INNER_L4_NONFRAG,
185                 [0x12] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,
186                 [0x13] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
187                          RTE_PTYPE_TUNNEL_GRENAT |
188                          RTE_PTYPE_INNER_L2_ETHER |
189                          RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
190                          RTE_PTYPE_INNER_L4_UDP,
191                 [0x14] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,
192                 [0x15] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP |
193                          RTE_PTYPE_TUNNEL_GRENAT |
194                          RTE_PTYPE_INNER_L2_ETHER |
195                          RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
196                          RTE_PTYPE_INNER_L4_TCP,
197                 [0x50] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
198                 [0x51] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
199                          RTE_PTYPE_TUNNEL_GRENAT |
200                          RTE_PTYPE_INNER_L2_ETHER |
201                          RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
202                          RTE_PTYPE_INNER_L4_FRAG,
203                 [0x52] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
204                 [0x53] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
205                          RTE_PTYPE_TUNNEL_GRENAT |
206                          RTE_PTYPE_INNER_L2_ETHER |
207                          RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
208                          RTE_PTYPE_INNER_L4_FRAG,
209                 [0x54] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
210                 [0x55] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
211                          RTE_PTYPE_TUNNEL_GRENAT |
212                          RTE_PTYPE_INNER_L2_ETHER |
213                          RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
214                          RTE_PTYPE_INNER_L4_FRAG,
215                 /* All others reserved */
216         };
217         cqrd_flags &= CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT
218                 | CQ_ENET_RQ_DESC_FLAGS_IPV4 | CQ_ENET_RQ_DESC_FLAGS_IPV6
219                 | CQ_ENET_RQ_DESC_FLAGS_TCP | CQ_ENET_RQ_DESC_FLAGS_UDP;
220         return cq_type_table[cqrd_flags + tnl];
221 }
222
223 static inline void
224 enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf)
225 {
226         struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
227         uint16_t bwflags, pkt_flags = 0, vlan_tci;
228         bwflags = enic_cq_rx_desc_bwflags(cqrd);
229         vlan_tci = enic_cq_rx_desc_vlan(cqrd);
230
231         /* VLAN STRIPPED flag. The L2 packet type updated here also */
232         if (bwflags & CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) {
233                 pkt_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
234                 mbuf->packet_type |= RTE_PTYPE_L2_ETHER;
235         } else {
236                 if (vlan_tci != 0)
237                         mbuf->packet_type |= RTE_PTYPE_L2_ETHER_VLAN;
238                 else
239                         mbuf->packet_type |= RTE_PTYPE_L2_ETHER;
240         }
241         mbuf->vlan_tci = vlan_tci;
242
243         if ((cqd->type_color & CQ_DESC_TYPE_MASK) == CQ_DESC_TYPE_CLASSIFIER) {
244                 struct cq_enet_rq_clsf_desc *clsf_cqd;
245                 uint16_t filter_id;
246                 clsf_cqd = (struct cq_enet_rq_clsf_desc *)cqd;
247                 filter_id = clsf_cqd->filter_id;
248                 if (filter_id) {
249                         pkt_flags |= PKT_RX_FDIR;
250                         if (filter_id != ENIC_MAGIC_FILTER_ID) {
251                                 mbuf->hash.fdir.hi = clsf_cqd->filter_id;
252                                 pkt_flags |= PKT_RX_FDIR_ID;
253                         }
254                 }
255         } else if (enic_cq_rx_desc_rss_type(cqrd)) {
256                 /* RSS flag */
257                 pkt_flags |= PKT_RX_RSS_HASH;
258                 mbuf->hash.rss = enic_cq_rx_desc_rss_hash(cqrd);
259         }
260
261         /* checksum flags */
262         if (mbuf->packet_type & (RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L3_IPV6)) {
263                 if (!enic_cq_rx_desc_csum_not_calc(cqrd)) {
264                         uint32_t l4_flags;
265                         l4_flags = mbuf->packet_type & RTE_PTYPE_L4_MASK;
266
267                         /*
268                          * When overlay offload is enabled, the NIC may
269                          * set ipv4_csum_ok=1 if the inner packet is IPv6..
270                          * So, explicitly check for IPv4 before checking
271                          * ipv4_csum_ok.
272                          */
273                         if (mbuf->packet_type & RTE_PTYPE_L3_IPV4) {
274                                 if (enic_cq_rx_desc_ipv4_csum_ok(cqrd))
275                                         pkt_flags |= PKT_RX_IP_CKSUM_GOOD;
276                                 else
277                                         pkt_flags |= PKT_RX_IP_CKSUM_BAD;
278                         }
279
280                         if (l4_flags == RTE_PTYPE_L4_UDP ||
281                             l4_flags == RTE_PTYPE_L4_TCP) {
282                                 if (enic_cq_rx_desc_tcp_udp_csum_ok(cqrd))
283                                         pkt_flags |= PKT_RX_L4_CKSUM_GOOD;
284                                 else
285                                         pkt_flags |= PKT_RX_L4_CKSUM_BAD;
286                         }
287                 }
288         }
289
290         mbuf->ol_flags = pkt_flags;
291 }
292
293 /* dummy receive function to replace actual function in
294  * order to do safe reconfiguration operations.
295  */
296 uint16_t
297 enic_dummy_recv_pkts(__rte_unused void *rx_queue,
298                      __rte_unused struct rte_mbuf **rx_pkts,
299                      __rte_unused uint16_t nb_pkts)
300 {
301         return 0;
302 }
303
304 uint16_t
305 enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
306                uint16_t nb_pkts)
307 {
308         struct vnic_rq *sop_rq = rx_queue;
309         struct vnic_rq *data_rq;
310         struct vnic_rq *rq;
311         struct enic *enic = vnic_dev_priv(sop_rq->vdev);
312         uint16_t cq_idx;
313         uint16_t rq_idx;
314         uint16_t rq_num;
315         struct rte_mbuf *nmb, *rxmb;
316         uint16_t nb_rx = 0;
317         struct vnic_cq *cq;
318         volatile struct cq_desc *cqd_ptr;
319         uint8_t color;
320         uint8_t tnl;
321         uint16_t seg_length;
322         struct rte_mbuf *first_seg = sop_rq->pkt_first_seg;
323         struct rte_mbuf *last_seg = sop_rq->pkt_last_seg;
324
325         cq = &enic->cq[enic_cq_rq(enic, sop_rq->index)];
326         cq_idx = cq->to_clean;          /* index of cqd, rqd, mbuf_table */
327         cqd_ptr = (struct cq_desc *)(cq->ring.descs) + cq_idx;
328
329         data_rq = &enic->rq[sop_rq->data_queue_idx];
330
331         while (nb_rx < nb_pkts) {
332                 volatile struct rq_enet_desc *rqd_ptr;
333                 struct cq_desc cqd;
334                 uint8_t packet_error;
335                 uint16_t ciflags;
336
337                 /* Check for pkts available */
338                 color = (cqd_ptr->type_color >> CQ_DESC_COLOR_SHIFT)
339                         & CQ_DESC_COLOR_MASK;
340                 if (color == cq->last_color)
341                         break;
342
343                 /* Get the cq descriptor and extract rq info from it */
344                 cqd = *cqd_ptr;
345                 rq_num = cqd.q_number & CQ_DESC_Q_NUM_MASK;
346                 rq_idx = cqd.completed_index & CQ_DESC_COMP_NDX_MASK;
347
348                 rq = &enic->rq[rq_num];
349                 rqd_ptr = ((struct rq_enet_desc *)rq->ring.descs) + rq_idx;
350
351                 /* allocate a new mbuf */
352                 nmb = rte_mbuf_raw_alloc(rq->mp);
353                 if (nmb == NULL) {
354                         rte_atomic64_inc(&enic->soft_stats.rx_nombuf);
355                         break;
356                 }
357
358                 /* A packet error means descriptor and data are untrusted */
359                 packet_error = enic_cq_rx_check_err(&cqd);
360
361                 /* Get the mbuf to return and replace with one just allocated */
362                 rxmb = rq->mbuf_ring[rq_idx];
363                 rq->mbuf_ring[rq_idx] = nmb;
364
365                 /* Increment cqd, rqd, mbuf_table index */
366                 cq_idx++;
367                 if (unlikely(cq_idx == cq->ring.desc_count)) {
368                         cq_idx = 0;
369                         cq->last_color = cq->last_color ? 0 : 1;
370                 }
371
372                 /* Prefetch next mbuf & desc while processing current one */
373                 cqd_ptr = (struct cq_desc *)(cq->ring.descs) + cq_idx;
374                 rte_enic_prefetch(cqd_ptr);
375
376                 ciflags = enic_cq_rx_desc_ciflags(
377                         (struct cq_enet_rq_desc *)&cqd);
378
379                 /* Push descriptor for newly allocated mbuf */
380                 nmb->data_off = RTE_PKTMBUF_HEADROOM;
381                 /*
382                  * Only the address needs to be refilled. length_type of the
383                  * descriptor it set during initialization
384                  * (enic_alloc_rx_queue_mbufs) and does not change.
385                  */
386                 rqd_ptr->address = rte_cpu_to_le_64(nmb->buf_iova +
387                                                     RTE_PKTMBUF_HEADROOM);
388
389                 /* Fill in the rest of the mbuf */
390                 seg_length = enic_cq_rx_desc_n_bytes(&cqd);
391
392                 if (rq->is_sop) {
393                         first_seg = rxmb;
394                         first_seg->pkt_len = seg_length;
395                 } else {
396                         first_seg->pkt_len = (uint16_t)(first_seg->pkt_len
397                                                         + seg_length);
398                         first_seg->nb_segs++;
399                         last_seg->next = rxmb;
400                 }
401
402                 rxmb->port = enic->port_id;
403                 rxmb->data_len = seg_length;
404
405                 rq->rx_nb_hold++;
406
407                 if (!(enic_cq_rx_desc_eop(ciflags))) {
408                         last_seg = rxmb;
409                         continue;
410                 }
411
412                 /*
413                  * When overlay offload is enabled, CQ.fcoe indicates the
414                  * packet is tunnelled.
415                  */
416                 tnl = enic->overlay_offload &&
417                         (ciflags & CQ_ENET_RQ_DESC_FLAGS_FCOE) != 0;
418                 /* cq rx flags are only valid if eop bit is set */
419                 first_seg->packet_type =
420                         enic_cq_rx_flags_to_pkt_type(&cqd, tnl);
421                 enic_cq_rx_to_pkt_flags(&cqd, first_seg);
422                 /* Wipe the outer types set by enic_cq_rx_flags_to_pkt_type() */
423                 if (tnl) {
424                         first_seg->packet_type &= ~(RTE_PTYPE_L3_MASK |
425                                                     RTE_PTYPE_L4_MASK);
426                 }
427                 if (unlikely(packet_error)) {
428                         rte_pktmbuf_free(first_seg);
429                         rte_atomic64_inc(&enic->soft_stats.rx_packet_errors);
430                         continue;
431                 }
432
433
434                 /* prefetch mbuf data for caller */
435                 rte_packet_prefetch(RTE_PTR_ADD(first_seg->buf_addr,
436                                     RTE_PKTMBUF_HEADROOM));
437
438                 /* store the mbuf address into the next entry of the array */
439                 rx_pkts[nb_rx++] = first_seg;
440         }
441
442         sop_rq->pkt_first_seg = first_seg;
443         sop_rq->pkt_last_seg = last_seg;
444
445         cq->to_clean = cq_idx;
446
447         if ((sop_rq->rx_nb_hold + data_rq->rx_nb_hold) >
448             sop_rq->rx_free_thresh) {
449                 if (data_rq->in_use) {
450                         data_rq->posted_index =
451                                 enic_ring_add(data_rq->ring.desc_count,
452                                               data_rq->posted_index,
453                                               data_rq->rx_nb_hold);
454                         data_rq->rx_nb_hold = 0;
455                 }
456                 sop_rq->posted_index = enic_ring_add(sop_rq->ring.desc_count,
457                                                      sop_rq->posted_index,
458                                                      sop_rq->rx_nb_hold);
459                 sop_rq->rx_nb_hold = 0;
460
461                 rte_mb();
462                 if (data_rq->in_use)
463                         iowrite32_relaxed(data_rq->posted_index,
464                                           &data_rq->ctrl->posted_index);
465                 rte_compiler_barrier();
466                 iowrite32_relaxed(sop_rq->posted_index,
467                                   &sop_rq->ctrl->posted_index);
468         }
469
470
471         return nb_rx;
472 }
473
474 static void enic_fast_free_wq_bufs(struct vnic_wq *wq, u16 completed_index)
475 {
476         unsigned int desc_count, n, nb_to_free, tail_idx;
477         struct rte_mempool *pool;
478         struct rte_mbuf **m;
479
480         desc_count = wq->ring.desc_count;
481         nb_to_free = enic_ring_sub(desc_count, wq->tail_idx, completed_index)
482                                    + 1;
483         tail_idx = wq->tail_idx;
484         wq->tail_idx += nb_to_free;
485         wq->ring.desc_avail += nb_to_free;
486         if (wq->tail_idx >= desc_count)
487                 wq->tail_idx -= desc_count;
488         /* First, free at most until the end of ring */
489         m = &wq->bufs[tail_idx];
490         pool = (*m)->pool;
491         n = RTE_MIN(nb_to_free, desc_count - tail_idx);
492         rte_mempool_put_bulk(pool, (void **)m, n);
493         n = nb_to_free - n;
494         /* Then wrap and free the rest */
495         if (unlikely(n))
496                 rte_mempool_put_bulk(pool, (void **)wq->bufs, n);
497 }
498
499 static inline void enic_free_wq_bufs(struct vnic_wq *wq, u16 completed_index)
500 {
501         struct rte_mbuf *buf;
502         struct rte_mbuf *m, *free[ENIC_MAX_WQ_DESCS];
503         unsigned int nb_to_free, nb_free = 0, i;
504         struct rte_mempool *pool;
505         unsigned int tail_idx;
506         unsigned int desc_count = wq->ring.desc_count;
507
508         nb_to_free = enic_ring_sub(desc_count, wq->tail_idx, completed_index)
509                                    + 1;
510         tail_idx = wq->tail_idx;
511         pool = wq->bufs[tail_idx]->pool;
512         for (i = 0; i < nb_to_free; i++) {
513                 buf = wq->bufs[tail_idx];
514                 m = rte_pktmbuf_prefree_seg(buf);
515                 if (unlikely(m == NULL)) {
516                         tail_idx = enic_ring_incr(desc_count, tail_idx);
517                         continue;
518                 }
519
520                 if (likely(m->pool == pool)) {
521                         RTE_ASSERT(nb_free < ENIC_MAX_WQ_DESCS);
522                         free[nb_free++] = m;
523                 } else {
524                         rte_mempool_put_bulk(pool, (void *)free, nb_free);
525                         free[0] = m;
526                         nb_free = 1;
527                         pool = m->pool;
528                 }
529                 tail_idx = enic_ring_incr(desc_count, tail_idx);
530         }
531
532         if (nb_free > 0)
533                 rte_mempool_put_bulk(pool, (void **)free, nb_free);
534
535         wq->tail_idx = tail_idx;
536         wq->ring.desc_avail += nb_to_free;
537 }
538
539 unsigned int enic_cleanup_wq(__rte_unused struct enic *enic, struct vnic_wq *wq)
540 {
541         u16 completed_index;
542
543         completed_index = *((uint32_t *)wq->cqmsg_rz->addr) & 0xffff;
544
545         if (wq->last_completed_index != completed_index) {
546                 if (wq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
547                         enic_fast_free_wq_bufs(wq, completed_index);
548                 else
549                         enic_free_wq_bufs(wq, completed_index);
550                 wq->last_completed_index = completed_index;
551         }
552         return 0;
553 }
554
555 uint16_t enic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
556                         uint16_t nb_pkts)
557 {
558         struct vnic_wq *wq = (struct vnic_wq *)tx_queue;
559         int32_t ret;
560         uint16_t i;
561         uint64_t ol_flags;
562         struct rte_mbuf *m;
563
564         for (i = 0; i != nb_pkts; i++) {
565                 m = tx_pkts[i];
566                 ol_flags = m->ol_flags;
567                 if (ol_flags & wq->tx_offload_notsup_mask) {
568                         rte_errno = ENOTSUP;
569                         return i;
570                 }
571 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
572                 ret = rte_validate_tx_offload(m);
573                 if (ret != 0) {
574                         rte_errno = ret;
575                         return i;
576                 }
577 #endif
578                 ret = rte_net_intel_cksum_prepare(m);
579                 if (ret != 0) {
580                         rte_errno = ret;
581                         return i;
582                 }
583         }
584
585         return i;
586 }
587
588 uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
589         uint16_t nb_pkts)
590 {
591         uint16_t index;
592         unsigned int pkt_len, data_len;
593         unsigned int nb_segs;
594         struct rte_mbuf *tx_pkt;
595         struct vnic_wq *wq = (struct vnic_wq *)tx_queue;
596         struct enic *enic = vnic_dev_priv(wq->vdev);
597         unsigned short vlan_id;
598         uint64_t ol_flags;
599         uint64_t ol_flags_mask;
600         unsigned int wq_desc_avail;
601         int head_idx;
602         unsigned int desc_count;
603         struct wq_enet_desc *descs, *desc_p, desc_tmp;
604         uint16_t mss;
605         uint8_t vlan_tag_insert;
606         uint8_t eop;
607         uint64_t bus_addr;
608         uint8_t offload_mode;
609         uint16_t header_len;
610         uint64_t tso;
611         rte_atomic64_t *tx_oversized;
612
613         enic_cleanup_wq(enic, wq);
614         wq_desc_avail = vnic_wq_desc_avail(wq);
615         head_idx = wq->head_idx;
616         desc_count = wq->ring.desc_count;
617         ol_flags_mask = PKT_TX_VLAN_PKT | PKT_TX_IP_CKSUM | PKT_TX_L4_MASK;
618         tx_oversized = &enic->soft_stats.tx_oversized;
619
620         nb_pkts = RTE_MIN(nb_pkts, ENIC_TX_XMIT_MAX);
621
622         for (index = 0; index < nb_pkts; index++) {
623                 tx_pkt = *tx_pkts++;
624                 pkt_len = tx_pkt->pkt_len;
625                 data_len = tx_pkt->data_len;
626                 ol_flags = tx_pkt->ol_flags;
627                 nb_segs = tx_pkt->nb_segs;
628                 tso = ol_flags & PKT_TX_TCP_SEG;
629
630                 /* drop packet if it's too big to send */
631                 if (unlikely(!tso && pkt_len > ENIC_TX_MAX_PKT_SIZE)) {
632                         rte_pktmbuf_free(tx_pkt);
633                         rte_atomic64_inc(tx_oversized);
634                         continue;
635                 }
636
637                 if (nb_segs > wq_desc_avail) {
638                         if (index > 0)
639                                 goto post;
640                         goto done;
641                 }
642
643                 mss = 0;
644                 vlan_id = tx_pkt->vlan_tci;
645                 vlan_tag_insert = !!(ol_flags & PKT_TX_VLAN_PKT);
646                 bus_addr = (dma_addr_t)
647                            (tx_pkt->buf_iova + tx_pkt->data_off);
648
649                 descs = (struct wq_enet_desc *)wq->ring.descs;
650                 desc_p = descs + head_idx;
651
652                 eop = (data_len == pkt_len);
653                 offload_mode = WQ_ENET_OFFLOAD_MODE_CSUM;
654                 header_len = 0;
655
656                 if (tso) {
657                         header_len = tx_pkt->l2_len + tx_pkt->l3_len +
658                                      tx_pkt->l4_len;
659
660                         /* Drop if non-TCP packet or TSO seg size is too big */
661                         if (unlikely(header_len == 0 || ((tx_pkt->tso_segsz +
662                             header_len) > ENIC_TX_MAX_PKT_SIZE))) {
663                                 rte_pktmbuf_free(tx_pkt);
664                                 rte_atomic64_inc(tx_oversized);
665                                 continue;
666                         }
667
668                         offload_mode = WQ_ENET_OFFLOAD_MODE_TSO;
669                         mss = tx_pkt->tso_segsz;
670                         /* For tunnel, need the size of outer+inner headers */
671                         if (ol_flags & PKT_TX_TUNNEL_MASK) {
672                                 header_len += tx_pkt->outer_l2_len +
673                                         tx_pkt->outer_l3_len;
674                         }
675                 }
676
677                 if ((ol_flags & ol_flags_mask) && (header_len == 0)) {
678                         if (ol_flags & PKT_TX_IP_CKSUM)
679                                 mss |= ENIC_CALC_IP_CKSUM;
680
681                         /* Nic uses just 1 bit for UDP and TCP */
682                         switch (ol_flags & PKT_TX_L4_MASK) {
683                         case PKT_TX_TCP_CKSUM:
684                         case PKT_TX_UDP_CKSUM:
685                                 mss |= ENIC_CALC_TCP_UDP_CKSUM;
686                                 break;
687                         }
688                 }
689
690
691                 wq_enet_desc_enc(&desc_tmp, bus_addr, data_len, mss, header_len,
692                                  offload_mode, eop, eop, 0, vlan_tag_insert,
693                                  vlan_id, 0);
694
695                 *desc_p = desc_tmp;
696                 wq->bufs[head_idx] = tx_pkt;
697                 head_idx = enic_ring_incr(desc_count, head_idx);
698                 wq_desc_avail--;
699
700                 if (!eop) {
701                         for (tx_pkt = tx_pkt->next; tx_pkt; tx_pkt =
702                             tx_pkt->next) {
703                                 data_len = tx_pkt->data_len;
704
705                                 if (tx_pkt->next == NULL)
706                                         eop = 1;
707                                 desc_p = descs + head_idx;
708                                 bus_addr = (dma_addr_t)(tx_pkt->buf_iova
709                                            + tx_pkt->data_off);
710                                 wq_enet_desc_enc((struct wq_enet_desc *)
711                                                  &desc_tmp, bus_addr, data_len,
712                                                  mss, 0, offload_mode, eop, eop,
713                                                  0, vlan_tag_insert, vlan_id,
714                                                  0);
715
716                                 *desc_p = desc_tmp;
717                                 wq->bufs[head_idx] = tx_pkt;
718                                 head_idx = enic_ring_incr(desc_count, head_idx);
719                                 wq_desc_avail--;
720                         }
721                 }
722         }
723  post:
724         rte_wmb();
725         iowrite32_relaxed(head_idx, &wq->ctrl->posted_index);
726  done:
727         wq->ring.desc_avail = wq_desc_avail;
728         wq->head_idx = head_idx;
729
730         return index;
731 }
732
733