1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
7 #include <rte_ethdev_driver.h>
9 #include <rte_prefetch.h>
11 #include "enic_compat.h"
12 #include "rq_enet_desc.h"
14 #include <rte_ether.h>
18 #define RTE_PMD_USE_PREFETCH
20 #ifdef RTE_PMD_USE_PREFETCH
21 /*Prefetch a cache line into all cache levels. */
22 #define rte_enic_prefetch(p) rte_prefetch0(p)
24 #define rte_enic_prefetch(p) do {} while (0)
27 #ifdef RTE_PMD_PACKET_PREFETCH
28 #define rte_packet_prefetch(p) rte_prefetch1(p)
30 #define rte_packet_prefetch(p) do {} while (0)
33 static inline uint16_t
34 enic_cq_rx_desc_ciflags(struct cq_enet_rq_desc *crd)
36 return le16_to_cpu(crd->completed_index_flags) & ~CQ_DESC_COMP_NDX_MASK;
39 static inline uint16_t
40 enic_cq_rx_desc_bwflags(struct cq_enet_rq_desc *crd)
42 return le16_to_cpu(crd->bytes_written_flags) &
43 ~CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
47 enic_cq_rx_desc_packet_error(uint16_t bwflags)
49 return (bwflags & CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ==
50 CQ_ENET_RQ_DESC_FLAGS_TRUNCATED;
54 enic_cq_rx_desc_eop(uint16_t ciflags)
56 return (ciflags & CQ_ENET_RQ_DESC_FLAGS_EOP)
57 == CQ_ENET_RQ_DESC_FLAGS_EOP;
61 enic_cq_rx_desc_csum_not_calc(struct cq_enet_rq_desc *cqrd)
63 return (le16_to_cpu(cqrd->q_number_rss_type_flags) &
64 CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ==
65 CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC;
69 enic_cq_rx_desc_ipv4_csum_ok(struct cq_enet_rq_desc *cqrd)
71 return (cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ==
72 CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK;
76 enic_cq_rx_desc_tcp_udp_csum_ok(struct cq_enet_rq_desc *cqrd)
78 return (cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ==
79 CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK;
83 enic_cq_rx_desc_rss_type(struct cq_enet_rq_desc *cqrd)
85 return (uint8_t)((le16_to_cpu(cqrd->q_number_rss_type_flags) >>
86 CQ_DESC_Q_NUM_BITS) & CQ_ENET_RQ_DESC_RSS_TYPE_MASK);
89 static inline uint32_t
90 enic_cq_rx_desc_rss_hash(struct cq_enet_rq_desc *cqrd)
92 return le32_to_cpu(cqrd->rss_hash);
95 static inline uint16_t
96 enic_cq_rx_desc_vlan(struct cq_enet_rq_desc *cqrd)
98 return le16_to_cpu(cqrd->vlan);
101 static inline uint16_t
102 enic_cq_rx_desc_n_bytes(struct cq_desc *cqd)
104 struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
105 return le16_to_cpu(cqrd->bytes_written_flags) &
106 CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
110 static inline uint8_t
111 enic_cq_rx_check_err(struct cq_desc *cqd)
113 struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
116 bwflags = enic_cq_rx_desc_bwflags(cqrd);
117 if (unlikely(enic_cq_rx_desc_packet_error(bwflags)))
122 /* Lookup table to translate RX CQ flags to mbuf flags. */
123 static inline uint32_t
124 enic_cq_rx_flags_to_pkt_type(struct cq_desc *cqd, uint8_t tnl)
126 struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
127 uint8_t cqrd_flags = cqrd->flags;
129 * Odd-numbered entries are for tunnel packets. All packet type info
130 * applies to the inner packet, and there is no info on the outer
131 * packet. The outer flags in these entries exist only to avoid
132 * changing enic_cq_rx_to_pkt_flags(). They are cleared from mbuf
135 * Also, as there is no tunnel type info (VXLAN, NVGRE, or GENEVE), set
136 * RTE_PTYPE_TUNNEL_GRENAT..
138 static const uint32_t cq_type_table[128] __rte_cache_aligned = {
139 [0x00] = RTE_PTYPE_UNKNOWN,
140 [0x01] = RTE_PTYPE_UNKNOWN |
141 RTE_PTYPE_TUNNEL_GRENAT |
142 RTE_PTYPE_INNER_L2_ETHER,
143 [0x20] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG,
144 [0x21] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG |
145 RTE_PTYPE_TUNNEL_GRENAT |
146 RTE_PTYPE_INNER_L2_ETHER |
147 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
148 RTE_PTYPE_INNER_L4_NONFRAG,
149 [0x22] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,
150 [0x23] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
151 RTE_PTYPE_TUNNEL_GRENAT |
152 RTE_PTYPE_INNER_L2_ETHER |
153 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
154 RTE_PTYPE_INNER_L4_UDP,
155 [0x24] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,
156 [0x25] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP |
157 RTE_PTYPE_TUNNEL_GRENAT |
158 RTE_PTYPE_INNER_L2_ETHER |
159 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
160 RTE_PTYPE_INNER_L4_TCP,
161 [0x60] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
162 [0x61] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
163 RTE_PTYPE_TUNNEL_GRENAT |
164 RTE_PTYPE_INNER_L2_ETHER |
165 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
166 RTE_PTYPE_INNER_L4_FRAG,
167 [0x62] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
168 [0x63] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
169 RTE_PTYPE_TUNNEL_GRENAT |
170 RTE_PTYPE_INNER_L2_ETHER |
171 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
172 RTE_PTYPE_INNER_L4_FRAG,
173 [0x64] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
174 [0x65] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
175 RTE_PTYPE_TUNNEL_GRENAT |
176 RTE_PTYPE_INNER_L2_ETHER |
177 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
178 RTE_PTYPE_INNER_L4_FRAG,
179 [0x10] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG,
180 [0x11] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG |
181 RTE_PTYPE_TUNNEL_GRENAT |
182 RTE_PTYPE_INNER_L2_ETHER |
183 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
184 RTE_PTYPE_INNER_L4_NONFRAG,
185 [0x12] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,
186 [0x13] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
187 RTE_PTYPE_TUNNEL_GRENAT |
188 RTE_PTYPE_INNER_L2_ETHER |
189 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
190 RTE_PTYPE_INNER_L4_UDP,
191 [0x14] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,
192 [0x15] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP |
193 RTE_PTYPE_TUNNEL_GRENAT |
194 RTE_PTYPE_INNER_L2_ETHER |
195 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
196 RTE_PTYPE_INNER_L4_TCP,
197 [0x50] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
198 [0x51] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
199 RTE_PTYPE_TUNNEL_GRENAT |
200 RTE_PTYPE_INNER_L2_ETHER |
201 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
202 RTE_PTYPE_INNER_L4_FRAG,
203 [0x52] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
204 [0x53] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
205 RTE_PTYPE_TUNNEL_GRENAT |
206 RTE_PTYPE_INNER_L2_ETHER |
207 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
208 RTE_PTYPE_INNER_L4_FRAG,
209 [0x54] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
210 [0x55] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
211 RTE_PTYPE_TUNNEL_GRENAT |
212 RTE_PTYPE_INNER_L2_ETHER |
213 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
214 RTE_PTYPE_INNER_L4_FRAG,
215 /* All others reserved */
217 cqrd_flags &= CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT
218 | CQ_ENET_RQ_DESC_FLAGS_IPV4 | CQ_ENET_RQ_DESC_FLAGS_IPV6
219 | CQ_ENET_RQ_DESC_FLAGS_TCP | CQ_ENET_RQ_DESC_FLAGS_UDP;
220 return cq_type_table[cqrd_flags + tnl];
224 enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf)
226 struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
227 uint16_t bwflags, pkt_flags = 0, vlan_tci;
228 bwflags = enic_cq_rx_desc_bwflags(cqrd);
229 vlan_tci = enic_cq_rx_desc_vlan(cqrd);
231 /* VLAN STRIPPED flag. The L2 packet type updated here also */
232 if (bwflags & CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) {
233 pkt_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
234 mbuf->packet_type |= RTE_PTYPE_L2_ETHER;
237 mbuf->packet_type |= RTE_PTYPE_L2_ETHER_VLAN;
239 mbuf->packet_type |= RTE_PTYPE_L2_ETHER;
241 mbuf->vlan_tci = vlan_tci;
243 if ((cqd->type_color & CQ_DESC_TYPE_MASK) == CQ_DESC_TYPE_CLASSIFIER) {
244 struct cq_enet_rq_clsf_desc *clsf_cqd;
246 clsf_cqd = (struct cq_enet_rq_clsf_desc *)cqd;
247 filter_id = clsf_cqd->filter_id;
249 pkt_flags |= PKT_RX_FDIR;
250 if (filter_id != ENIC_MAGIC_FILTER_ID) {
251 mbuf->hash.fdir.hi = clsf_cqd->filter_id;
252 pkt_flags |= PKT_RX_FDIR_ID;
255 } else if (enic_cq_rx_desc_rss_type(cqrd)) {
257 pkt_flags |= PKT_RX_RSS_HASH;
258 mbuf->hash.rss = enic_cq_rx_desc_rss_hash(cqrd);
262 if (mbuf->packet_type & (RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L3_IPV6)) {
263 if (!enic_cq_rx_desc_csum_not_calc(cqrd)) {
265 l4_flags = mbuf->packet_type & RTE_PTYPE_L4_MASK;
268 * When overlay offload is enabled, the NIC may
269 * set ipv4_csum_ok=1 if the inner packet is IPv6..
270 * So, explicitly check for IPv4 before checking
273 if (mbuf->packet_type & RTE_PTYPE_L3_IPV4) {
274 if (enic_cq_rx_desc_ipv4_csum_ok(cqrd))
275 pkt_flags |= PKT_RX_IP_CKSUM_GOOD;
277 pkt_flags |= PKT_RX_IP_CKSUM_BAD;
280 if (l4_flags == RTE_PTYPE_L4_UDP ||
281 l4_flags == RTE_PTYPE_L4_TCP) {
282 if (enic_cq_rx_desc_tcp_udp_csum_ok(cqrd))
283 pkt_flags |= PKT_RX_L4_CKSUM_GOOD;
285 pkt_flags |= PKT_RX_L4_CKSUM_BAD;
290 mbuf->ol_flags = pkt_flags;
293 /* dummy receive function to replace actual function in
294 * order to do safe reconfiguration operations.
297 enic_dummy_recv_pkts(__rte_unused void *rx_queue,
298 __rte_unused struct rte_mbuf **rx_pkts,
299 __rte_unused uint16_t nb_pkts)
305 enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
308 struct vnic_rq *sop_rq = rx_queue;
309 struct vnic_rq *data_rq;
311 struct enic *enic = vnic_dev_priv(sop_rq->vdev);
313 uint16_t rq_idx, max_rx;
315 struct rte_mbuf *nmb, *rxmb;
318 volatile struct cq_desc *cqd_ptr;
322 struct rte_mbuf *first_seg = sop_rq->pkt_first_seg;
323 struct rte_mbuf *last_seg = sop_rq->pkt_last_seg;
325 cq = &enic->cq[enic_cq_rq(enic, sop_rq->index)];
326 cq_idx = cq->to_clean; /* index of cqd, rqd, mbuf_table */
327 cqd_ptr = (struct cq_desc *)(cq->ring.descs) + cq_idx;
328 color = cq->last_color;
330 data_rq = &enic->rq[sop_rq->data_queue_idx];
332 /* Receive until the end of the ring, at most. */
333 max_rx = RTE_MIN(nb_pkts, cq->ring.desc_count - cq_idx);
336 volatile struct rq_enet_desc *rqd_ptr;
338 uint8_t packet_error;
343 /* Check for pkts available */
344 if ((cqd_ptr->type_color & CQ_DESC_COLOR_MASK_NOSHIFT) == color)
347 /* Get the cq descriptor and extract rq info from it */
349 rq_num = cqd.q_number & CQ_DESC_Q_NUM_MASK;
350 rq_idx = cqd.completed_index & CQ_DESC_COMP_NDX_MASK;
352 rq = &enic->rq[rq_num];
353 rqd_ptr = ((struct rq_enet_desc *)rq->ring.descs) + rq_idx;
355 /* allocate a new mbuf */
356 nmb = rte_mbuf_raw_alloc(rq->mp);
358 rte_atomic64_inc(&enic->soft_stats.rx_nombuf);
362 /* A packet error means descriptor and data are untrusted */
363 packet_error = enic_cq_rx_check_err(&cqd);
365 /* Get the mbuf to return and replace with one just allocated */
366 rxmb = rq->mbuf_ring[rq_idx];
367 rq->mbuf_ring[rq_idx] = nmb;
370 /* Prefetch next mbuf & desc while processing current one */
371 cqd_ptr = (struct cq_desc *)(cq->ring.descs) + cq_idx;
372 rte_enic_prefetch(cqd_ptr);
374 ciflags = enic_cq_rx_desc_ciflags(
375 (struct cq_enet_rq_desc *)&cqd);
377 /* Push descriptor for newly allocated mbuf */
378 nmb->data_off = RTE_PKTMBUF_HEADROOM;
380 * Only the address needs to be refilled. length_type of the
381 * descriptor it set during initialization
382 * (enic_alloc_rx_queue_mbufs) and does not change.
384 rqd_ptr->address = rte_cpu_to_le_64(nmb->buf_iova +
385 RTE_PKTMBUF_HEADROOM);
387 /* Fill in the rest of the mbuf */
388 seg_length = enic_cq_rx_desc_n_bytes(&cqd);
392 first_seg->pkt_len = seg_length;
394 first_seg->pkt_len = (uint16_t)(first_seg->pkt_len
396 first_seg->nb_segs++;
397 last_seg->next = rxmb;
400 rxmb->port = enic->port_id;
401 rxmb->data_len = seg_length;
405 if (!(enic_cq_rx_desc_eop(ciflags))) {
411 * When overlay offload is enabled, CQ.fcoe indicates the
412 * packet is tunnelled.
414 tnl = enic->overlay_offload &&
415 (ciflags & CQ_ENET_RQ_DESC_FLAGS_FCOE) != 0;
416 /* cq rx flags are only valid if eop bit is set */
417 first_seg->packet_type =
418 enic_cq_rx_flags_to_pkt_type(&cqd, tnl);
419 enic_cq_rx_to_pkt_flags(&cqd, first_seg);
421 /* Wipe the outer types set by enic_cq_rx_flags_to_pkt_type() */
423 first_seg->packet_type &= ~(RTE_PTYPE_L3_MASK |
426 if (unlikely(packet_error)) {
427 rte_pktmbuf_free(first_seg);
428 rte_atomic64_inc(&enic->soft_stats.rx_packet_errors);
433 /* prefetch mbuf data for caller */
434 rte_packet_prefetch(RTE_PTR_ADD(first_seg->buf_addr,
435 RTE_PKTMBUF_HEADROOM));
437 /* store the mbuf address into the next entry of the array */
438 rx_pkts[nb_rx++] = first_seg;
440 if (unlikely(cq_idx == cq->ring.desc_count)) {
442 cq->last_color ^= CQ_DESC_COLOR_MASK_NOSHIFT;
445 sop_rq->pkt_first_seg = first_seg;
446 sop_rq->pkt_last_seg = last_seg;
448 cq->to_clean = cq_idx;
450 if ((sop_rq->rx_nb_hold + data_rq->rx_nb_hold) >
451 sop_rq->rx_free_thresh) {
452 if (data_rq->in_use) {
453 data_rq->posted_index =
454 enic_ring_add(data_rq->ring.desc_count,
455 data_rq->posted_index,
456 data_rq->rx_nb_hold);
457 data_rq->rx_nb_hold = 0;
459 sop_rq->posted_index = enic_ring_add(sop_rq->ring.desc_count,
460 sop_rq->posted_index,
462 sop_rq->rx_nb_hold = 0;
466 iowrite32_relaxed(data_rq->posted_index,
467 &data_rq->ctrl->posted_index);
468 rte_compiler_barrier();
469 iowrite32_relaxed(sop_rq->posted_index,
470 &sop_rq->ctrl->posted_index);
478 enic_noscatter_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
481 struct rte_mbuf *mb, **rx, **rxmb;
482 uint16_t cq_idx, nb_rx, max_rx;
483 struct cq_enet_rq_desc *cqd;
484 struct rq_enet_desc *rqd;
485 unsigned int port_id;
494 enic = vnic_dev_priv(rq->vdev);
495 cq = &enic->cq[enic_cq_rq(enic, rq->index)];
496 cq_idx = cq->to_clean;
499 * Fill up the reserve of free mbufs. Below, we restock the receive
500 * ring with these mbufs to avoid allocation failures.
502 if (rq->num_free_mbufs == 0) {
503 if (rte_mempool_get_bulk(rq->mp, (void **)rq->free_mbufs,
506 rq->num_free_mbufs = ENIC_RX_BURST_MAX;
509 /* Receive until the end of the ring, at most. */
510 max_rx = RTE_MIN(nb_pkts, rq->num_free_mbufs);
511 max_rx = RTE_MIN(max_rx, cq->ring.desc_count - cq_idx);
513 cqd = (struct cq_enet_rq_desc *)(cq->ring.descs) + cq_idx;
514 color = cq->last_color;
515 rxmb = rq->mbuf_ring + cq_idx;
516 port_id = enic->port_id;
517 overlay = enic->overlay_offload;
522 if ((cqd->type_color & CQ_DESC_COLOR_MASK_NOSHIFT) == color)
524 if (unlikely(cqd->bytes_written_flags &
525 CQ_ENET_RQ_DESC_FLAGS_TRUNCATED)) {
526 rte_pktmbuf_free(*rxmb++);
527 rte_atomic64_inc(&enic->soft_stats.rx_packet_errors);
533 /* prefetch mbuf data for caller */
534 rte_packet_prefetch(RTE_PTR_ADD(mb->buf_addr,
535 RTE_PKTMBUF_HEADROOM));
536 mb->data_len = cqd->bytes_written_flags &
537 CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
538 mb->pkt_len = mb->data_len;
540 tnl = overlay && (cqd->completed_index_flags &
541 CQ_ENET_RQ_DESC_FLAGS_FCOE) != 0;
543 enic_cq_rx_flags_to_pkt_type((struct cq_desc *)cqd,
545 enic_cq_rx_to_pkt_flags((struct cq_desc *)cqd, mb);
546 /* Wipe the outer types set by enic_cq_rx_flags_to_pkt_type() */
548 mb->packet_type &= ~(RTE_PTYPE_L3_MASK |
554 /* Number of descriptors visited */
555 nb_rx = cqd - (struct cq_enet_rq_desc *)(cq->ring.descs) - cq_idx;
558 rqd = ((struct rq_enet_desc *)rq->ring.descs) + cq_idx;
559 rxmb = rq->mbuf_ring + cq_idx;
561 rq->rx_nb_hold += nb_rx;
562 if (unlikely(cq_idx == cq->ring.desc_count)) {
564 cq->last_color ^= CQ_DESC_COLOR_MASK_NOSHIFT;
566 cq->to_clean = cq_idx;
568 memcpy(rxmb, rq->free_mbufs + ENIC_RX_BURST_MAX - rq->num_free_mbufs,
569 sizeof(struct rte_mbuf *) * nb_rx);
570 rq->num_free_mbufs -= nb_rx;
574 mb->data_off = RTE_PKTMBUF_HEADROOM;
575 rqd->address = mb->buf_iova + RTE_PKTMBUF_HEADROOM;
578 if (rq->rx_nb_hold > rq->rx_free_thresh) {
579 rq->posted_index = enic_ring_add(rq->ring.desc_count,
584 iowrite32_relaxed(rq->posted_index,
585 &rq->ctrl->posted_index);
591 static inline void enic_free_wq_bufs(struct vnic_wq *wq, u16 completed_index)
593 struct rte_mbuf *buf;
594 struct rte_mbuf *m, *free[ENIC_MAX_WQ_DESCS];
595 unsigned int nb_to_free, nb_free = 0, i;
596 struct rte_mempool *pool;
597 unsigned int tail_idx;
598 unsigned int desc_count = wq->ring.desc_count;
600 nb_to_free = enic_ring_sub(desc_count, wq->tail_idx, completed_index)
602 tail_idx = wq->tail_idx;
603 pool = wq->bufs[tail_idx]->pool;
604 for (i = 0; i < nb_to_free; i++) {
605 buf = wq->bufs[tail_idx];
606 m = rte_pktmbuf_prefree_seg(buf);
607 if (unlikely(m == NULL)) {
608 tail_idx = enic_ring_incr(desc_count, tail_idx);
612 if (likely(m->pool == pool)) {
613 RTE_ASSERT(nb_free < ENIC_MAX_WQ_DESCS);
616 rte_mempool_put_bulk(pool, (void *)free, nb_free);
621 tail_idx = enic_ring_incr(desc_count, tail_idx);
625 rte_mempool_put_bulk(pool, (void **)free, nb_free);
627 wq->tail_idx = tail_idx;
628 wq->ring.desc_avail += nb_to_free;
631 unsigned int enic_cleanup_wq(__rte_unused struct enic *enic, struct vnic_wq *wq)
635 completed_index = *((uint32_t *)wq->cqmsg_rz->addr) & 0xffff;
637 if (wq->last_completed_index != completed_index) {
638 enic_free_wq_bufs(wq, completed_index);
639 wq->last_completed_index = completed_index;
644 uint16_t enic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
647 struct vnic_wq *wq = (struct vnic_wq *)tx_queue;
653 for (i = 0; i != nb_pkts; i++) {
655 if (unlikely(m->pkt_len > ENIC_TX_MAX_PKT_SIZE)) {
659 ol_flags = m->ol_flags;
660 if (ol_flags & wq->tx_offload_notsup_mask) {
664 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
665 ret = rte_validate_tx_offload(m);
671 ret = rte_net_intel_cksum_prepare(m);
681 uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
685 unsigned int pkt_len, data_len;
686 unsigned int nb_segs;
687 struct rte_mbuf *tx_pkt;
688 struct vnic_wq *wq = (struct vnic_wq *)tx_queue;
689 struct enic *enic = vnic_dev_priv(wq->vdev);
690 unsigned short vlan_id;
692 uint64_t ol_flags_mask;
693 unsigned int wq_desc_avail;
695 unsigned int desc_count;
696 struct wq_enet_desc *descs, *desc_p, desc_tmp;
698 uint8_t vlan_tag_insert;
701 uint8_t offload_mode;
704 rte_atomic64_t *tx_oversized;
706 enic_cleanup_wq(enic, wq);
707 wq_desc_avail = vnic_wq_desc_avail(wq);
708 head_idx = wq->head_idx;
709 desc_count = wq->ring.desc_count;
710 ol_flags_mask = PKT_TX_VLAN_PKT | PKT_TX_IP_CKSUM | PKT_TX_L4_MASK;
711 tx_oversized = &enic->soft_stats.tx_oversized;
713 nb_pkts = RTE_MIN(nb_pkts, ENIC_TX_XMIT_MAX);
715 for (index = 0; index < nb_pkts; index++) {
717 pkt_len = tx_pkt->pkt_len;
718 data_len = tx_pkt->data_len;
719 ol_flags = tx_pkt->ol_flags;
720 nb_segs = tx_pkt->nb_segs;
721 tso = ol_flags & PKT_TX_TCP_SEG;
723 /* drop packet if it's too big to send */
724 if (unlikely(!tso && pkt_len > ENIC_TX_MAX_PKT_SIZE)) {
725 rte_pktmbuf_free(tx_pkt);
726 rte_atomic64_inc(tx_oversized);
730 if (nb_segs > wq_desc_avail) {
737 vlan_id = tx_pkt->vlan_tci;
738 vlan_tag_insert = !!(ol_flags & PKT_TX_VLAN_PKT);
739 bus_addr = (dma_addr_t)
740 (tx_pkt->buf_iova + tx_pkt->data_off);
742 descs = (struct wq_enet_desc *)wq->ring.descs;
743 desc_p = descs + head_idx;
745 eop = (data_len == pkt_len);
746 offload_mode = WQ_ENET_OFFLOAD_MODE_CSUM;
750 header_len = tx_pkt->l2_len + tx_pkt->l3_len +
753 /* Drop if non-TCP packet or TSO seg size is too big */
754 if (unlikely(header_len == 0 || ((tx_pkt->tso_segsz +
755 header_len) > ENIC_TX_MAX_PKT_SIZE))) {
756 rte_pktmbuf_free(tx_pkt);
757 rte_atomic64_inc(tx_oversized);
761 offload_mode = WQ_ENET_OFFLOAD_MODE_TSO;
762 mss = tx_pkt->tso_segsz;
763 /* For tunnel, need the size of outer+inner headers */
764 if (ol_flags & PKT_TX_TUNNEL_MASK) {
765 header_len += tx_pkt->outer_l2_len +
766 tx_pkt->outer_l3_len;
770 if ((ol_flags & ol_flags_mask) && (header_len == 0)) {
771 if (ol_flags & PKT_TX_IP_CKSUM)
772 mss |= ENIC_CALC_IP_CKSUM;
774 /* Nic uses just 1 bit for UDP and TCP */
775 switch (ol_flags & PKT_TX_L4_MASK) {
776 case PKT_TX_TCP_CKSUM:
777 case PKT_TX_UDP_CKSUM:
778 mss |= ENIC_CALC_TCP_UDP_CKSUM;
784 if (eop && wq->cq_pend >= ENIC_WQ_CQ_THRESH) {
788 wq_enet_desc_enc(&desc_tmp, bus_addr, data_len, mss, header_len,
789 offload_mode, eop, cq, 0, vlan_tag_insert,
793 wq->bufs[head_idx] = tx_pkt;
794 head_idx = enic_ring_incr(desc_count, head_idx);
798 for (tx_pkt = tx_pkt->next; tx_pkt; tx_pkt =
800 data_len = tx_pkt->data_len;
804 if (tx_pkt->next == NULL) {
806 if (wq->cq_pend >= ENIC_WQ_CQ_THRESH) {
811 desc_p = descs + head_idx;
812 bus_addr = (dma_addr_t)(tx_pkt->buf_iova
814 wq_enet_desc_enc((struct wq_enet_desc *)
815 &desc_tmp, bus_addr, data_len,
816 mss, 0, offload_mode, eop, cq,
817 0, vlan_tag_insert, vlan_id,
821 wq->bufs[head_idx] = tx_pkt;
822 head_idx = enic_ring_incr(desc_count, head_idx);
829 iowrite32_relaxed(head_idx, &wq->ctrl->posted_index);
831 wq->ring.desc_avail = wq_desc_avail;
832 wq->head_idx = head_idx;
837 static void enqueue_simple_pkts(struct rte_mbuf **pkts,
838 struct wq_enet_desc *desc,
847 desc->address = p->buf_iova + p->data_off;
848 desc->length = p->pkt_len;
850 * The app should not send oversized
851 * packets. tx_pkt_prepare includes a check as
852 * well. But some apps ignore the device max size and
853 * tx_pkt_prepare. Oversized packets cause WQ errrors
854 * and the NIC ends up disabling the whole WQ. So
857 if (unlikely(p->pkt_len > ENIC_TX_MAX_PKT_SIZE)) {
858 desc->length = ENIC_TX_MAX_PKT_SIZE;
859 rte_atomic64_inc(&enic->soft_stats.tx_oversized);
865 uint16_t enic_simple_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
868 unsigned int head_idx, desc_count;
869 struct wq_enet_desc *desc;
874 wq = (struct vnic_wq *)tx_queue;
875 enic = vnic_dev_priv(wq->vdev);
876 enic_cleanup_wq(enic, wq);
877 /* Will enqueue this many packets in this call */
878 nb_pkts = RTE_MIN(nb_pkts, wq->ring.desc_avail);
882 head_idx = wq->head_idx;
883 desc_count = wq->ring.desc_count;
885 /* Descriptors until the end of the ring */
886 n = desc_count - head_idx;
887 n = RTE_MIN(nb_pkts, n);
889 /* Save mbuf pointers to free later */
890 memcpy(wq->bufs + head_idx, tx_pkts, sizeof(struct rte_mbuf *) * n);
892 /* Enqueue until the ring end */
894 desc = ((struct wq_enet_desc *)wq->ring.descs) + head_idx;
895 enqueue_simple_pkts(tx_pkts, desc, n, enic);
897 /* Wrap to the start of the ring */
900 memcpy(wq->bufs, tx_pkts, sizeof(struct rte_mbuf *) * rem);
901 desc = (struct wq_enet_desc *)wq->ring.descs;
902 enqueue_simple_pkts(tx_pkts, desc, rem, enic);
906 /* Update head_idx and desc_avail */
907 wq->ring.desc_avail -= nb_pkts;
909 if (head_idx >= desc_count)
910 head_idx -= desc_count;
911 wq->head_idx = head_idx;
912 iowrite32_relaxed(head_idx, &wq->ctrl->posted_index);