1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
7 #include <rte_ethdev_driver.h>
9 #include <rte_prefetch.h>
11 #include "enic_compat.h"
12 #include "rq_enet_desc.h"
14 #include <rte_ether.h>
18 #define RTE_PMD_USE_PREFETCH
20 #ifdef RTE_PMD_USE_PREFETCH
21 /*Prefetch a cache line into all cache levels. */
22 #define rte_enic_prefetch(p) rte_prefetch0(p)
24 #define rte_enic_prefetch(p) do {} while (0)
27 #ifdef RTE_PMD_PACKET_PREFETCH
28 #define rte_packet_prefetch(p) rte_prefetch1(p)
30 #define rte_packet_prefetch(p) do {} while (0)
33 static inline uint16_t
34 enic_cq_rx_desc_ciflags(struct cq_enet_rq_desc *crd)
36 return le16_to_cpu(crd->completed_index_flags) & ~CQ_DESC_COMP_NDX_MASK;
39 static inline uint16_t
40 enic_cq_rx_desc_bwflags(struct cq_enet_rq_desc *crd)
42 return le16_to_cpu(crd->bytes_written_flags) &
43 ~CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
47 enic_cq_rx_desc_packet_error(uint16_t bwflags)
49 return (bwflags & CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ==
50 CQ_ENET_RQ_DESC_FLAGS_TRUNCATED;
54 enic_cq_rx_desc_eop(uint16_t ciflags)
56 return (ciflags & CQ_ENET_RQ_DESC_FLAGS_EOP)
57 == CQ_ENET_RQ_DESC_FLAGS_EOP;
61 enic_cq_rx_desc_csum_not_calc(struct cq_enet_rq_desc *cqrd)
63 return (le16_to_cpu(cqrd->q_number_rss_type_flags) &
64 CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ==
65 CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC;
69 enic_cq_rx_desc_ipv4_csum_ok(struct cq_enet_rq_desc *cqrd)
71 return (cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ==
72 CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK;
76 enic_cq_rx_desc_tcp_udp_csum_ok(struct cq_enet_rq_desc *cqrd)
78 return (cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ==
79 CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK;
83 enic_cq_rx_desc_rss_type(struct cq_enet_rq_desc *cqrd)
85 return (uint8_t)((le16_to_cpu(cqrd->q_number_rss_type_flags) >>
86 CQ_DESC_Q_NUM_BITS) & CQ_ENET_RQ_DESC_RSS_TYPE_MASK);
89 static inline uint32_t
90 enic_cq_rx_desc_rss_hash(struct cq_enet_rq_desc *cqrd)
92 return le32_to_cpu(cqrd->rss_hash);
95 static inline uint16_t
96 enic_cq_rx_desc_vlan(struct cq_enet_rq_desc *cqrd)
98 return le16_to_cpu(cqrd->vlan);
101 static inline uint16_t
102 enic_cq_rx_desc_n_bytes(struct cq_desc *cqd)
104 struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
105 return le16_to_cpu(cqrd->bytes_written_flags) &
106 CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
110 static inline uint8_t
111 enic_cq_rx_check_err(struct cq_desc *cqd)
113 struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
116 bwflags = enic_cq_rx_desc_bwflags(cqrd);
117 if (unlikely(enic_cq_rx_desc_packet_error(bwflags)))
122 /* Lookup table to translate RX CQ flags to mbuf flags. */
123 static inline uint32_t
124 enic_cq_rx_flags_to_pkt_type(struct cq_desc *cqd, uint8_t tnl)
126 struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
127 uint8_t cqrd_flags = cqrd->flags;
129 * Odd-numbered entries are for tunnel packets. All packet type info
130 * applies to the inner packet, and there is no info on the outer
131 * packet. The outer flags in these entries exist only to avoid
132 * changing enic_cq_rx_to_pkt_flags(). They are cleared from mbuf
135 * Also, as there is no tunnel type info (VXLAN, NVGRE, or GENEVE), set
136 * RTE_PTYPE_TUNNEL_GRENAT..
138 static const uint32_t cq_type_table[128] __rte_cache_aligned = {
139 [0x00] = RTE_PTYPE_UNKNOWN,
140 [0x01] = RTE_PTYPE_UNKNOWN |
141 RTE_PTYPE_TUNNEL_GRENAT |
142 RTE_PTYPE_INNER_L2_ETHER,
143 [0x20] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG,
144 [0x21] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG |
145 RTE_PTYPE_TUNNEL_GRENAT |
146 RTE_PTYPE_INNER_L2_ETHER |
147 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
148 RTE_PTYPE_INNER_L4_NONFRAG,
149 [0x22] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,
150 [0x23] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
151 RTE_PTYPE_TUNNEL_GRENAT |
152 RTE_PTYPE_INNER_L2_ETHER |
153 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
154 RTE_PTYPE_INNER_L4_UDP,
155 [0x24] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,
156 [0x25] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP |
157 RTE_PTYPE_TUNNEL_GRENAT |
158 RTE_PTYPE_INNER_L2_ETHER |
159 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
160 RTE_PTYPE_INNER_L4_TCP,
161 [0x60] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
162 [0x61] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
163 RTE_PTYPE_TUNNEL_GRENAT |
164 RTE_PTYPE_INNER_L2_ETHER |
165 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
166 RTE_PTYPE_INNER_L4_FRAG,
167 [0x62] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
168 [0x63] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
169 RTE_PTYPE_TUNNEL_GRENAT |
170 RTE_PTYPE_INNER_L2_ETHER |
171 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
172 RTE_PTYPE_INNER_L4_FRAG,
173 [0x64] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
174 [0x65] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
175 RTE_PTYPE_TUNNEL_GRENAT |
176 RTE_PTYPE_INNER_L2_ETHER |
177 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
178 RTE_PTYPE_INNER_L4_FRAG,
179 [0x10] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG,
180 [0x11] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG |
181 RTE_PTYPE_TUNNEL_GRENAT |
182 RTE_PTYPE_INNER_L2_ETHER |
183 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
184 RTE_PTYPE_INNER_L4_NONFRAG,
185 [0x12] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,
186 [0x13] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
187 RTE_PTYPE_TUNNEL_GRENAT |
188 RTE_PTYPE_INNER_L2_ETHER |
189 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
190 RTE_PTYPE_INNER_L4_UDP,
191 [0x14] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,
192 [0x15] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP |
193 RTE_PTYPE_TUNNEL_GRENAT |
194 RTE_PTYPE_INNER_L2_ETHER |
195 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
196 RTE_PTYPE_INNER_L4_TCP,
197 [0x50] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
198 [0x51] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
199 RTE_PTYPE_TUNNEL_GRENAT |
200 RTE_PTYPE_INNER_L2_ETHER |
201 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
202 RTE_PTYPE_INNER_L4_FRAG,
203 [0x52] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
204 [0x53] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
205 RTE_PTYPE_TUNNEL_GRENAT |
206 RTE_PTYPE_INNER_L2_ETHER |
207 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
208 RTE_PTYPE_INNER_L4_FRAG,
209 [0x54] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
210 [0x55] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
211 RTE_PTYPE_TUNNEL_GRENAT |
212 RTE_PTYPE_INNER_L2_ETHER |
213 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
214 RTE_PTYPE_INNER_L4_FRAG,
215 /* All others reserved */
217 cqrd_flags &= CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT
218 | CQ_ENET_RQ_DESC_FLAGS_IPV4 | CQ_ENET_RQ_DESC_FLAGS_IPV6
219 | CQ_ENET_RQ_DESC_FLAGS_TCP | CQ_ENET_RQ_DESC_FLAGS_UDP;
220 return cq_type_table[cqrd_flags + tnl];
224 enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf)
226 struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
227 uint16_t bwflags, pkt_flags = 0, vlan_tci;
228 bwflags = enic_cq_rx_desc_bwflags(cqrd);
229 vlan_tci = enic_cq_rx_desc_vlan(cqrd);
231 /* VLAN STRIPPED flag. The L2 packet type updated here also */
232 if (bwflags & CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) {
233 pkt_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
234 mbuf->packet_type |= RTE_PTYPE_L2_ETHER;
237 pkt_flags |= PKT_RX_VLAN;
238 mbuf->packet_type |= RTE_PTYPE_L2_ETHER_VLAN;
240 mbuf->packet_type |= RTE_PTYPE_L2_ETHER;
243 mbuf->vlan_tci = vlan_tci;
245 if ((cqd->type_color & CQ_DESC_TYPE_MASK) == CQ_DESC_TYPE_CLASSIFIER) {
246 struct cq_enet_rq_clsf_desc *clsf_cqd;
248 clsf_cqd = (struct cq_enet_rq_clsf_desc *)cqd;
249 filter_id = clsf_cqd->filter_id;
251 pkt_flags |= PKT_RX_FDIR;
252 if (filter_id != ENIC_MAGIC_FILTER_ID) {
253 mbuf->hash.fdir.hi = clsf_cqd->filter_id;
254 pkt_flags |= PKT_RX_FDIR_ID;
257 } else if (enic_cq_rx_desc_rss_type(cqrd)) {
259 pkt_flags |= PKT_RX_RSS_HASH;
260 mbuf->hash.rss = enic_cq_rx_desc_rss_hash(cqrd);
264 if (mbuf->packet_type & (RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L3_IPV6)) {
265 if (!enic_cq_rx_desc_csum_not_calc(cqrd)) {
267 l4_flags = mbuf->packet_type & RTE_PTYPE_L4_MASK;
270 * When overlay offload is enabled, the NIC may
271 * set ipv4_csum_ok=1 if the inner packet is IPv6..
272 * So, explicitly check for IPv4 before checking
275 if (mbuf->packet_type & RTE_PTYPE_L3_IPV4) {
276 if (enic_cq_rx_desc_ipv4_csum_ok(cqrd))
277 pkt_flags |= PKT_RX_IP_CKSUM_GOOD;
279 pkt_flags |= PKT_RX_IP_CKSUM_BAD;
282 if (l4_flags == RTE_PTYPE_L4_UDP ||
283 l4_flags == RTE_PTYPE_L4_TCP) {
284 if (enic_cq_rx_desc_tcp_udp_csum_ok(cqrd))
285 pkt_flags |= PKT_RX_L4_CKSUM_GOOD;
287 pkt_flags |= PKT_RX_L4_CKSUM_BAD;
292 mbuf->ol_flags = pkt_flags;
295 /* dummy receive function to replace actual function in
296 * order to do safe reconfiguration operations.
299 enic_dummy_recv_pkts(__rte_unused void *rx_queue,
300 __rte_unused struct rte_mbuf **rx_pkts,
301 __rte_unused uint16_t nb_pkts)
307 enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
310 struct vnic_rq *sop_rq = rx_queue;
311 struct vnic_rq *data_rq;
313 struct enic *enic = vnic_dev_priv(sop_rq->vdev);
315 uint16_t rq_idx, max_rx;
317 struct rte_mbuf *nmb, *rxmb;
320 volatile struct cq_desc *cqd_ptr;
324 struct rte_mbuf *first_seg = sop_rq->pkt_first_seg;
325 struct rte_mbuf *last_seg = sop_rq->pkt_last_seg;
327 cq = &enic->cq[enic_cq_rq(enic, sop_rq->index)];
328 cq_idx = cq->to_clean; /* index of cqd, rqd, mbuf_table */
329 cqd_ptr = (struct cq_desc *)(cq->ring.descs) + cq_idx;
330 color = cq->last_color;
332 data_rq = &enic->rq[sop_rq->data_queue_idx];
334 /* Receive until the end of the ring, at most. */
335 max_rx = RTE_MIN(nb_pkts, cq->ring.desc_count - cq_idx);
338 volatile struct rq_enet_desc *rqd_ptr;
340 uint8_t packet_error;
345 /* Check for pkts available */
346 if ((cqd_ptr->type_color & CQ_DESC_COLOR_MASK_NOSHIFT) == color)
349 /* Get the cq descriptor and extract rq info from it */
351 rq_num = cqd.q_number & CQ_DESC_Q_NUM_MASK;
352 rq_idx = cqd.completed_index & CQ_DESC_COMP_NDX_MASK;
354 rq = &enic->rq[rq_num];
355 rqd_ptr = ((struct rq_enet_desc *)rq->ring.descs) + rq_idx;
357 /* allocate a new mbuf */
358 nmb = rte_mbuf_raw_alloc(rq->mp);
360 rte_atomic64_inc(&enic->soft_stats.rx_nombuf);
364 /* A packet error means descriptor and data are untrusted */
365 packet_error = enic_cq_rx_check_err(&cqd);
367 /* Get the mbuf to return and replace with one just allocated */
368 rxmb = rq->mbuf_ring[rq_idx];
369 rq->mbuf_ring[rq_idx] = nmb;
372 /* Prefetch next mbuf & desc while processing current one */
373 cqd_ptr = (struct cq_desc *)(cq->ring.descs) + cq_idx;
374 rte_enic_prefetch(cqd_ptr);
376 ciflags = enic_cq_rx_desc_ciflags(
377 (struct cq_enet_rq_desc *)&cqd);
379 /* Push descriptor for newly allocated mbuf */
380 nmb->data_off = RTE_PKTMBUF_HEADROOM;
382 * Only the address needs to be refilled. length_type of the
383 * descriptor it set during initialization
384 * (enic_alloc_rx_queue_mbufs) and does not change.
386 rqd_ptr->address = rte_cpu_to_le_64(nmb->buf_iova +
387 RTE_PKTMBUF_HEADROOM);
389 /* Fill in the rest of the mbuf */
390 seg_length = enic_cq_rx_desc_n_bytes(&cqd);
394 first_seg->pkt_len = seg_length;
396 first_seg->pkt_len = (uint16_t)(first_seg->pkt_len
398 first_seg->nb_segs++;
399 last_seg->next = rxmb;
402 rxmb->port = enic->port_id;
403 rxmb->data_len = seg_length;
407 if (!(enic_cq_rx_desc_eop(ciflags))) {
413 * When overlay offload is enabled, CQ.fcoe indicates the
414 * packet is tunnelled.
416 tnl = enic->overlay_offload &&
417 (ciflags & CQ_ENET_RQ_DESC_FLAGS_FCOE) != 0;
418 /* cq rx flags are only valid if eop bit is set */
419 first_seg->packet_type =
420 enic_cq_rx_flags_to_pkt_type(&cqd, tnl);
421 enic_cq_rx_to_pkt_flags(&cqd, first_seg);
423 /* Wipe the outer types set by enic_cq_rx_flags_to_pkt_type() */
425 first_seg->packet_type &= ~(RTE_PTYPE_L3_MASK |
428 if (unlikely(packet_error)) {
429 rte_pktmbuf_free(first_seg);
430 rte_atomic64_inc(&enic->soft_stats.rx_packet_errors);
435 /* prefetch mbuf data for caller */
436 rte_packet_prefetch(RTE_PTR_ADD(first_seg->buf_addr,
437 RTE_PKTMBUF_HEADROOM));
439 /* store the mbuf address into the next entry of the array */
440 rx_pkts[nb_rx++] = first_seg;
442 if (unlikely(cq_idx == cq->ring.desc_count)) {
444 cq->last_color ^= CQ_DESC_COLOR_MASK_NOSHIFT;
447 sop_rq->pkt_first_seg = first_seg;
448 sop_rq->pkt_last_seg = last_seg;
450 cq->to_clean = cq_idx;
452 if ((sop_rq->rx_nb_hold + data_rq->rx_nb_hold) >
453 sop_rq->rx_free_thresh) {
454 if (data_rq->in_use) {
455 data_rq->posted_index =
456 enic_ring_add(data_rq->ring.desc_count,
457 data_rq->posted_index,
458 data_rq->rx_nb_hold);
459 data_rq->rx_nb_hold = 0;
461 sop_rq->posted_index = enic_ring_add(sop_rq->ring.desc_count,
462 sop_rq->posted_index,
464 sop_rq->rx_nb_hold = 0;
468 iowrite32_relaxed(data_rq->posted_index,
469 &data_rq->ctrl->posted_index);
470 rte_compiler_barrier();
471 iowrite32_relaxed(sop_rq->posted_index,
472 &sop_rq->ctrl->posted_index);
480 enic_noscatter_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
483 struct rte_mbuf *mb, **rx, **rxmb;
484 uint16_t cq_idx, nb_rx, max_rx;
485 struct cq_enet_rq_desc *cqd;
486 struct rq_enet_desc *rqd;
487 unsigned int port_id;
496 enic = vnic_dev_priv(rq->vdev);
497 cq = &enic->cq[enic_cq_rq(enic, rq->index)];
498 cq_idx = cq->to_clean;
501 * Fill up the reserve of free mbufs. Below, we restock the receive
502 * ring with these mbufs to avoid allocation failures.
504 if (rq->num_free_mbufs == 0) {
505 if (rte_mempool_get_bulk(rq->mp, (void **)rq->free_mbufs,
508 rq->num_free_mbufs = ENIC_RX_BURST_MAX;
511 /* Receive until the end of the ring, at most. */
512 max_rx = RTE_MIN(nb_pkts, rq->num_free_mbufs);
513 max_rx = RTE_MIN(max_rx, cq->ring.desc_count - cq_idx);
515 cqd = (struct cq_enet_rq_desc *)(cq->ring.descs) + cq_idx;
516 color = cq->last_color;
517 rxmb = rq->mbuf_ring + cq_idx;
518 port_id = enic->port_id;
519 overlay = enic->overlay_offload;
524 if ((cqd->type_color & CQ_DESC_COLOR_MASK_NOSHIFT) == color)
526 if (unlikely(cqd->bytes_written_flags &
527 CQ_ENET_RQ_DESC_FLAGS_TRUNCATED)) {
528 rte_pktmbuf_free(*rxmb++);
529 rte_atomic64_inc(&enic->soft_stats.rx_packet_errors);
535 /* prefetch mbuf data for caller */
536 rte_packet_prefetch(RTE_PTR_ADD(mb->buf_addr,
537 RTE_PKTMBUF_HEADROOM));
538 mb->data_len = cqd->bytes_written_flags &
539 CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
540 mb->pkt_len = mb->data_len;
542 tnl = overlay && (cqd->completed_index_flags &
543 CQ_ENET_RQ_DESC_FLAGS_FCOE) != 0;
545 enic_cq_rx_flags_to_pkt_type((struct cq_desc *)cqd,
547 enic_cq_rx_to_pkt_flags((struct cq_desc *)cqd, mb);
548 /* Wipe the outer types set by enic_cq_rx_flags_to_pkt_type() */
550 mb->packet_type &= ~(RTE_PTYPE_L3_MASK |
556 /* Number of descriptors visited */
557 nb_rx = cqd - (struct cq_enet_rq_desc *)(cq->ring.descs) - cq_idx;
560 rqd = ((struct rq_enet_desc *)rq->ring.descs) + cq_idx;
561 rxmb = rq->mbuf_ring + cq_idx;
563 rq->rx_nb_hold += nb_rx;
564 if (unlikely(cq_idx == cq->ring.desc_count)) {
566 cq->last_color ^= CQ_DESC_COLOR_MASK_NOSHIFT;
568 cq->to_clean = cq_idx;
570 memcpy(rxmb, rq->free_mbufs + ENIC_RX_BURST_MAX - rq->num_free_mbufs,
571 sizeof(struct rte_mbuf *) * nb_rx);
572 rq->num_free_mbufs -= nb_rx;
576 mb->data_off = RTE_PKTMBUF_HEADROOM;
577 rqd->address = mb->buf_iova + RTE_PKTMBUF_HEADROOM;
580 if (rq->rx_nb_hold > rq->rx_free_thresh) {
581 rq->posted_index = enic_ring_add(rq->ring.desc_count,
586 iowrite32_relaxed(rq->posted_index,
587 &rq->ctrl->posted_index);
593 static inline void enic_free_wq_bufs(struct vnic_wq *wq, u16 completed_index)
595 struct rte_mbuf *buf;
596 struct rte_mbuf *m, *free[ENIC_MAX_WQ_DESCS];
597 unsigned int nb_to_free, nb_free = 0, i;
598 struct rte_mempool *pool;
599 unsigned int tail_idx;
600 unsigned int desc_count = wq->ring.desc_count;
602 nb_to_free = enic_ring_sub(desc_count, wq->tail_idx, completed_index)
604 tail_idx = wq->tail_idx;
605 pool = wq->bufs[tail_idx]->pool;
606 for (i = 0; i < nb_to_free; i++) {
607 buf = wq->bufs[tail_idx];
608 m = rte_pktmbuf_prefree_seg(buf);
609 if (unlikely(m == NULL)) {
610 tail_idx = enic_ring_incr(desc_count, tail_idx);
614 if (likely(m->pool == pool)) {
615 RTE_ASSERT(nb_free < ENIC_MAX_WQ_DESCS);
618 rte_mempool_put_bulk(pool, (void *)free, nb_free);
623 tail_idx = enic_ring_incr(desc_count, tail_idx);
627 rte_mempool_put_bulk(pool, (void **)free, nb_free);
629 wq->tail_idx = tail_idx;
630 wq->ring.desc_avail += nb_to_free;
633 unsigned int enic_cleanup_wq(__rte_unused struct enic *enic, struct vnic_wq *wq)
637 completed_index = *((uint32_t *)wq->cqmsg_rz->addr) & 0xffff;
639 if (wq->last_completed_index != completed_index) {
640 enic_free_wq_bufs(wq, completed_index);
641 wq->last_completed_index = completed_index;
646 uint16_t enic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
649 struct vnic_wq *wq = (struct vnic_wq *)tx_queue;
655 for (i = 0; i != nb_pkts; i++) {
657 if (unlikely(m->pkt_len > ENIC_TX_MAX_PKT_SIZE)) {
661 ol_flags = m->ol_flags;
662 if (ol_flags & wq->tx_offload_notsup_mask) {
666 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
667 ret = rte_validate_tx_offload(m);
673 ret = rte_net_intel_cksum_prepare(m);
683 uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
687 unsigned int pkt_len, data_len;
688 unsigned int nb_segs;
689 struct rte_mbuf *tx_pkt;
690 struct vnic_wq *wq = (struct vnic_wq *)tx_queue;
691 struct enic *enic = vnic_dev_priv(wq->vdev);
692 unsigned short vlan_id;
694 uint64_t ol_flags_mask;
695 unsigned int wq_desc_avail;
697 unsigned int desc_count;
698 struct wq_enet_desc *descs, *desc_p, desc_tmp;
700 uint8_t vlan_tag_insert;
703 uint8_t offload_mode;
706 rte_atomic64_t *tx_oversized;
708 enic_cleanup_wq(enic, wq);
709 wq_desc_avail = vnic_wq_desc_avail(wq);
710 head_idx = wq->head_idx;
711 desc_count = wq->ring.desc_count;
712 ol_flags_mask = PKT_TX_VLAN_PKT | PKT_TX_IP_CKSUM | PKT_TX_L4_MASK;
713 tx_oversized = &enic->soft_stats.tx_oversized;
715 nb_pkts = RTE_MIN(nb_pkts, ENIC_TX_XMIT_MAX);
717 for (index = 0; index < nb_pkts; index++) {
719 pkt_len = tx_pkt->pkt_len;
720 data_len = tx_pkt->data_len;
721 ol_flags = tx_pkt->ol_flags;
722 nb_segs = tx_pkt->nb_segs;
723 tso = ol_flags & PKT_TX_TCP_SEG;
725 /* drop packet if it's too big to send */
726 if (unlikely(!tso && pkt_len > ENIC_TX_MAX_PKT_SIZE)) {
727 rte_pktmbuf_free(tx_pkt);
728 rte_atomic64_inc(tx_oversized);
732 if (nb_segs > wq_desc_avail) {
739 vlan_id = tx_pkt->vlan_tci;
740 vlan_tag_insert = !!(ol_flags & PKT_TX_VLAN_PKT);
741 bus_addr = (dma_addr_t)
742 (tx_pkt->buf_iova + tx_pkt->data_off);
744 descs = (struct wq_enet_desc *)wq->ring.descs;
745 desc_p = descs + head_idx;
747 eop = (data_len == pkt_len);
748 offload_mode = WQ_ENET_OFFLOAD_MODE_CSUM;
752 header_len = tx_pkt->l2_len + tx_pkt->l3_len +
755 /* Drop if non-TCP packet or TSO seg size is too big */
756 if (unlikely(header_len == 0 || ((tx_pkt->tso_segsz +
757 header_len) > ENIC_TX_MAX_PKT_SIZE))) {
758 rte_pktmbuf_free(tx_pkt);
759 rte_atomic64_inc(tx_oversized);
763 offload_mode = WQ_ENET_OFFLOAD_MODE_TSO;
764 mss = tx_pkt->tso_segsz;
765 /* For tunnel, need the size of outer+inner headers */
766 if (ol_flags & PKT_TX_TUNNEL_MASK) {
767 header_len += tx_pkt->outer_l2_len +
768 tx_pkt->outer_l3_len;
772 if ((ol_flags & ol_flags_mask) && (header_len == 0)) {
773 if (ol_flags & PKT_TX_IP_CKSUM)
774 mss |= ENIC_CALC_IP_CKSUM;
776 /* Nic uses just 1 bit for UDP and TCP */
777 switch (ol_flags & PKT_TX_L4_MASK) {
778 case PKT_TX_TCP_CKSUM:
779 case PKT_TX_UDP_CKSUM:
780 mss |= ENIC_CALC_TCP_UDP_CKSUM;
786 if (eop && wq->cq_pend >= ENIC_WQ_CQ_THRESH) {
790 wq_enet_desc_enc(&desc_tmp, bus_addr, data_len, mss, header_len,
791 offload_mode, eop, cq, 0, vlan_tag_insert,
795 wq->bufs[head_idx] = tx_pkt;
796 head_idx = enic_ring_incr(desc_count, head_idx);
800 for (tx_pkt = tx_pkt->next; tx_pkt; tx_pkt =
802 data_len = tx_pkt->data_len;
806 if (tx_pkt->next == NULL) {
808 if (wq->cq_pend >= ENIC_WQ_CQ_THRESH) {
813 desc_p = descs + head_idx;
814 bus_addr = (dma_addr_t)(tx_pkt->buf_iova
816 wq_enet_desc_enc((struct wq_enet_desc *)
817 &desc_tmp, bus_addr, data_len,
818 mss, 0, offload_mode, eop, cq,
819 0, vlan_tag_insert, vlan_id,
823 wq->bufs[head_idx] = tx_pkt;
824 head_idx = enic_ring_incr(desc_count, head_idx);
831 iowrite32_relaxed(head_idx, &wq->ctrl->posted_index);
833 wq->ring.desc_avail = wq_desc_avail;
834 wq->head_idx = head_idx;
839 static void enqueue_simple_pkts(struct rte_mbuf **pkts,
840 struct wq_enet_desc *desc,
849 desc->address = p->buf_iova + p->data_off;
850 desc->length = p->pkt_len;
852 * The app should not send oversized
853 * packets. tx_pkt_prepare includes a check as
854 * well. But some apps ignore the device max size and
855 * tx_pkt_prepare. Oversized packets cause WQ errrors
856 * and the NIC ends up disabling the whole WQ. So
859 if (unlikely(p->pkt_len > ENIC_TX_MAX_PKT_SIZE)) {
860 desc->length = ENIC_TX_MAX_PKT_SIZE;
861 rte_atomic64_inc(&enic->soft_stats.tx_oversized);
867 uint16_t enic_simple_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
870 unsigned int head_idx, desc_count;
871 struct wq_enet_desc *desc;
876 wq = (struct vnic_wq *)tx_queue;
877 enic = vnic_dev_priv(wq->vdev);
878 enic_cleanup_wq(enic, wq);
879 /* Will enqueue this many packets in this call */
880 nb_pkts = RTE_MIN(nb_pkts, wq->ring.desc_avail);
884 head_idx = wq->head_idx;
885 desc_count = wq->ring.desc_count;
887 /* Descriptors until the end of the ring */
888 n = desc_count - head_idx;
889 n = RTE_MIN(nb_pkts, n);
891 /* Save mbuf pointers to free later */
892 memcpy(wq->bufs + head_idx, tx_pkts, sizeof(struct rte_mbuf *) * n);
894 /* Enqueue until the ring end */
896 desc = ((struct wq_enet_desc *)wq->ring.descs) + head_idx;
897 enqueue_simple_pkts(tx_pkts, desc, n, enic);
899 /* Wrap to the start of the ring */
902 memcpy(wq->bufs, tx_pkts, sizeof(struct rte_mbuf *) * rem);
903 desc = (struct wq_enet_desc *)wq->ring.descs;
904 enqueue_simple_pkts(tx_pkts, desc, rem, enic);
908 /* Update head_idx and desc_avail */
909 wq->ring.desc_avail -= nb_pkts;
911 if (head_idx >= desc_count)
912 head_idx -= desc_count;
913 wq->head_idx = head_idx;
914 iowrite32_relaxed(head_idx, &wq->ctrl->posted_index);