1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
7 #include <rte_ethdev_driver.h>
9 #include <rte_prefetch.h>
11 #include "enic_compat.h"
12 #include "rq_enet_desc.h"
14 #include <rte_ether.h>
18 #define ENIC_TX_OFFLOAD_MASK ( \
24 #define ENIC_TX_OFFLOAD_NOTSUP_MASK \
25 (PKT_TX_OFFLOAD_MASK ^ ENIC_TX_OFFLOAD_MASK)
27 #define RTE_PMD_USE_PREFETCH
29 #ifdef RTE_PMD_USE_PREFETCH
30 /*Prefetch a cache line into all cache levels. */
31 #define rte_enic_prefetch(p) rte_prefetch0(p)
33 #define rte_enic_prefetch(p) do {} while (0)
36 #ifdef RTE_PMD_PACKET_PREFETCH
37 #define rte_packet_prefetch(p) rte_prefetch1(p)
39 #define rte_packet_prefetch(p) do {} while (0)
42 static inline uint16_t
43 enic_cq_rx_desc_ciflags(struct cq_enet_rq_desc *crd)
45 return le16_to_cpu(crd->completed_index_flags) & ~CQ_DESC_COMP_NDX_MASK;
48 static inline uint16_t
49 enic_cq_rx_desc_bwflags(struct cq_enet_rq_desc *crd)
51 return le16_to_cpu(crd->bytes_written_flags) &
52 ~CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
56 enic_cq_rx_desc_packet_error(uint16_t bwflags)
58 return (bwflags & CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ==
59 CQ_ENET_RQ_DESC_FLAGS_TRUNCATED;
63 enic_cq_rx_desc_eop(uint16_t ciflags)
65 return (ciflags & CQ_ENET_RQ_DESC_FLAGS_EOP)
66 == CQ_ENET_RQ_DESC_FLAGS_EOP;
70 enic_cq_rx_desc_csum_not_calc(struct cq_enet_rq_desc *cqrd)
72 return (le16_to_cpu(cqrd->q_number_rss_type_flags) &
73 CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ==
74 CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC;
78 enic_cq_rx_desc_ipv4_csum_ok(struct cq_enet_rq_desc *cqrd)
80 return (cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ==
81 CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK;
85 enic_cq_rx_desc_tcp_udp_csum_ok(struct cq_enet_rq_desc *cqrd)
87 return (cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ==
88 CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK;
92 enic_cq_rx_desc_rss_type(struct cq_enet_rq_desc *cqrd)
94 return (uint8_t)((le16_to_cpu(cqrd->q_number_rss_type_flags) >>
95 CQ_DESC_Q_NUM_BITS) & CQ_ENET_RQ_DESC_RSS_TYPE_MASK);
98 static inline uint32_t
99 enic_cq_rx_desc_rss_hash(struct cq_enet_rq_desc *cqrd)
101 return le32_to_cpu(cqrd->rss_hash);
104 static inline uint16_t
105 enic_cq_rx_desc_vlan(struct cq_enet_rq_desc *cqrd)
107 return le16_to_cpu(cqrd->vlan);
110 static inline uint16_t
111 enic_cq_rx_desc_n_bytes(struct cq_desc *cqd)
113 struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
114 return le16_to_cpu(cqrd->bytes_written_flags) &
115 CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
119 static inline uint8_t
120 enic_cq_rx_check_err(struct cq_desc *cqd)
122 struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
125 bwflags = enic_cq_rx_desc_bwflags(cqrd);
126 if (unlikely(enic_cq_rx_desc_packet_error(bwflags)))
131 /* Lookup table to translate RX CQ flags to mbuf flags. */
132 static inline uint32_t
133 enic_cq_rx_flags_to_pkt_type(struct cq_desc *cqd)
135 struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
136 uint8_t cqrd_flags = cqrd->flags;
137 static const uint32_t cq_type_table[128] __rte_cache_aligned = {
138 [0x00] = RTE_PTYPE_UNKNOWN,
139 [0x20] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG,
140 [0x22] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,
141 [0x24] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,
142 [0x60] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
143 [0x62] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,
144 [0x64] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,
145 [0x10] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG,
146 [0x12] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,
147 [0x14] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,
148 [0x50] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
149 [0x52] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,
150 [0x54] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,
151 /* All others reserved */
153 cqrd_flags &= CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT
154 | CQ_ENET_RQ_DESC_FLAGS_IPV4 | CQ_ENET_RQ_DESC_FLAGS_IPV6
155 | CQ_ENET_RQ_DESC_FLAGS_TCP | CQ_ENET_RQ_DESC_FLAGS_UDP;
156 return cq_type_table[cqrd_flags];
160 enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf)
162 struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
163 uint16_t bwflags, pkt_flags = 0, vlan_tci;
164 bwflags = enic_cq_rx_desc_bwflags(cqrd);
165 vlan_tci = enic_cq_rx_desc_vlan(cqrd);
167 /* VLAN STRIPPED flag. The L2 packet type updated here also */
168 if (bwflags & CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) {
169 pkt_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
170 mbuf->packet_type |= RTE_PTYPE_L2_ETHER;
173 mbuf->packet_type |= RTE_PTYPE_L2_ETHER_VLAN;
175 mbuf->packet_type |= RTE_PTYPE_L2_ETHER;
177 mbuf->vlan_tci = vlan_tci;
179 if ((cqd->type_color & CQ_DESC_TYPE_MASK) == CQ_DESC_TYPE_CLASSIFIER) {
180 struct cq_enet_rq_clsf_desc *clsf_cqd;
182 clsf_cqd = (struct cq_enet_rq_clsf_desc *)cqd;
183 filter_id = clsf_cqd->filter_id;
185 pkt_flags |= PKT_RX_FDIR;
186 if (filter_id != ENIC_MAGIC_FILTER_ID) {
187 mbuf->hash.fdir.hi = clsf_cqd->filter_id;
188 pkt_flags |= PKT_RX_FDIR_ID;
191 } else if (enic_cq_rx_desc_rss_type(cqrd)) {
193 pkt_flags |= PKT_RX_RSS_HASH;
194 mbuf->hash.rss = enic_cq_rx_desc_rss_hash(cqrd);
198 if (mbuf->packet_type & (RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L3_IPV6)) {
199 if (!enic_cq_rx_desc_csum_not_calc(cqrd)) {
201 l4_flags = mbuf->packet_type & RTE_PTYPE_L4_MASK;
203 if (enic_cq_rx_desc_ipv4_csum_ok(cqrd))
204 pkt_flags |= PKT_RX_IP_CKSUM_GOOD;
205 else if (mbuf->packet_type & RTE_PTYPE_L3_IPV4)
206 pkt_flags |= PKT_RX_IP_CKSUM_BAD;
208 if (l4_flags == RTE_PTYPE_L4_UDP ||
209 l4_flags == RTE_PTYPE_L4_TCP) {
210 if (enic_cq_rx_desc_tcp_udp_csum_ok(cqrd))
211 pkt_flags |= PKT_RX_L4_CKSUM_GOOD;
213 pkt_flags |= PKT_RX_L4_CKSUM_BAD;
218 mbuf->ol_flags = pkt_flags;
221 /* dummy receive function to replace actual function in
222 * order to do safe reconfiguration operations.
225 enic_dummy_recv_pkts(__rte_unused void *rx_queue,
226 __rte_unused struct rte_mbuf **rx_pkts,
227 __rte_unused uint16_t nb_pkts)
233 enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
236 struct vnic_rq *sop_rq = rx_queue;
237 struct vnic_rq *data_rq;
239 struct enic *enic = vnic_dev_priv(sop_rq->vdev);
243 struct rte_mbuf *nmb, *rxmb;
246 volatile struct cq_desc *cqd_ptr;
249 struct rte_mbuf *first_seg = sop_rq->pkt_first_seg;
250 struct rte_mbuf *last_seg = sop_rq->pkt_last_seg;
252 cq = &enic->cq[enic_cq_rq(enic, sop_rq->index)];
253 cq_idx = cq->to_clean; /* index of cqd, rqd, mbuf_table */
254 cqd_ptr = (struct cq_desc *)(cq->ring.descs) + cq_idx;
256 data_rq = &enic->rq[sop_rq->data_queue_idx];
258 while (nb_rx < nb_pkts) {
259 volatile struct rq_enet_desc *rqd_ptr;
261 uint8_t packet_error;
264 /* Check for pkts available */
265 color = (cqd_ptr->type_color >> CQ_DESC_COLOR_SHIFT)
266 & CQ_DESC_COLOR_MASK;
267 if (color == cq->last_color)
270 /* Get the cq descriptor and extract rq info from it */
272 rq_num = cqd.q_number & CQ_DESC_Q_NUM_MASK;
273 rq_idx = cqd.completed_index & CQ_DESC_COMP_NDX_MASK;
275 rq = &enic->rq[rq_num];
276 rqd_ptr = ((struct rq_enet_desc *)rq->ring.descs) + rq_idx;
278 /* allocate a new mbuf */
279 nmb = rte_mbuf_raw_alloc(rq->mp);
281 rte_atomic64_inc(&enic->soft_stats.rx_nombuf);
285 /* A packet error means descriptor and data are untrusted */
286 packet_error = enic_cq_rx_check_err(&cqd);
288 /* Get the mbuf to return and replace with one just allocated */
289 rxmb = rq->mbuf_ring[rq_idx];
290 rq->mbuf_ring[rq_idx] = nmb;
292 /* Increment cqd, rqd, mbuf_table index */
294 if (unlikely(cq_idx == cq->ring.desc_count)) {
296 cq->last_color = cq->last_color ? 0 : 1;
299 /* Prefetch next mbuf & desc while processing current one */
300 cqd_ptr = (struct cq_desc *)(cq->ring.descs) + cq_idx;
301 rte_enic_prefetch(cqd_ptr);
303 ciflags = enic_cq_rx_desc_ciflags(
304 (struct cq_enet_rq_desc *)&cqd);
306 /* Push descriptor for newly allocated mbuf */
307 nmb->data_off = RTE_PKTMBUF_HEADROOM;
309 * Only the address needs to be refilled. length_type of the
310 * descriptor it set during initialization
311 * (enic_alloc_rx_queue_mbufs) and does not change.
313 rqd_ptr->address = rte_cpu_to_le_64(nmb->buf_iova +
314 RTE_PKTMBUF_HEADROOM);
316 /* Fill in the rest of the mbuf */
317 seg_length = enic_cq_rx_desc_n_bytes(&cqd);
321 first_seg->pkt_len = seg_length;
323 first_seg->pkt_len = (uint16_t)(first_seg->pkt_len
325 first_seg->nb_segs++;
326 last_seg->next = rxmb;
329 rxmb->port = enic->port_id;
330 rxmb->data_len = seg_length;
334 if (!(enic_cq_rx_desc_eop(ciflags))) {
339 /* cq rx flags are only valid if eop bit is set */
340 first_seg->packet_type = enic_cq_rx_flags_to_pkt_type(&cqd);
341 enic_cq_rx_to_pkt_flags(&cqd, first_seg);
343 if (unlikely(packet_error)) {
344 rte_pktmbuf_free(first_seg);
345 rte_atomic64_inc(&enic->soft_stats.rx_packet_errors);
350 /* prefetch mbuf data for caller */
351 rte_packet_prefetch(RTE_PTR_ADD(first_seg->buf_addr,
352 RTE_PKTMBUF_HEADROOM));
354 /* store the mbuf address into the next entry of the array */
355 rx_pkts[nb_rx++] = first_seg;
358 sop_rq->pkt_first_seg = first_seg;
359 sop_rq->pkt_last_seg = last_seg;
361 cq->to_clean = cq_idx;
363 if ((sop_rq->rx_nb_hold + data_rq->rx_nb_hold) >
364 sop_rq->rx_free_thresh) {
365 if (data_rq->in_use) {
366 data_rq->posted_index =
367 enic_ring_add(data_rq->ring.desc_count,
368 data_rq->posted_index,
369 data_rq->rx_nb_hold);
370 data_rq->rx_nb_hold = 0;
372 sop_rq->posted_index = enic_ring_add(sop_rq->ring.desc_count,
373 sop_rq->posted_index,
375 sop_rq->rx_nb_hold = 0;
379 iowrite32_relaxed(data_rq->posted_index,
380 &data_rq->ctrl->posted_index);
381 rte_compiler_barrier();
382 iowrite32_relaxed(sop_rq->posted_index,
383 &sop_rq->ctrl->posted_index);
390 static inline void enic_free_wq_bufs(struct vnic_wq *wq, u16 completed_index)
392 struct vnic_wq_buf *buf;
393 struct rte_mbuf *m, *free[ENIC_MAX_WQ_DESCS];
394 unsigned int nb_to_free, nb_free = 0, i;
395 struct rte_mempool *pool;
396 unsigned int tail_idx;
397 unsigned int desc_count = wq->ring.desc_count;
399 nb_to_free = enic_ring_sub(desc_count, wq->tail_idx, completed_index)
401 tail_idx = wq->tail_idx;
402 buf = &wq->bufs[tail_idx];
403 pool = ((struct rte_mbuf *)buf->mb)->pool;
404 for (i = 0; i < nb_to_free; i++) {
405 buf = &wq->bufs[tail_idx];
406 m = rte_pktmbuf_prefree_seg((struct rte_mbuf *)(buf->mb));
409 if (unlikely(m == NULL)) {
410 tail_idx = enic_ring_incr(desc_count, tail_idx);
414 if (likely(m->pool == pool)) {
415 RTE_ASSERT(nb_free < ENIC_MAX_WQ_DESCS);
418 rte_mempool_put_bulk(pool, (void *)free, nb_free);
423 tail_idx = enic_ring_incr(desc_count, tail_idx);
427 rte_mempool_put_bulk(pool, (void **)free, nb_free);
429 wq->tail_idx = tail_idx;
430 wq->ring.desc_avail += nb_to_free;
433 unsigned int enic_cleanup_wq(__rte_unused struct enic *enic, struct vnic_wq *wq)
437 completed_index = *((uint32_t *)wq->cqmsg_rz->addr) & 0xffff;
439 if (wq->last_completed_index != completed_index) {
440 enic_free_wq_bufs(wq, completed_index);
441 wq->last_completed_index = completed_index;
446 uint16_t enic_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
454 for (i = 0; i != nb_pkts; i++) {
456 ol_flags = m->ol_flags;
457 if (ol_flags & ENIC_TX_OFFLOAD_NOTSUP_MASK) {
458 rte_errno = -ENOTSUP;
461 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
462 ret = rte_validate_tx_offload(m);
468 ret = rte_net_intel_cksum_prepare(m);
478 uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
482 unsigned int pkt_len, data_len;
483 unsigned int nb_segs;
484 struct rte_mbuf *tx_pkt;
485 struct vnic_wq *wq = (struct vnic_wq *)tx_queue;
486 struct enic *enic = vnic_dev_priv(wq->vdev);
487 unsigned short vlan_id;
489 uint64_t ol_flags_mask;
490 unsigned int wq_desc_avail;
492 struct vnic_wq_buf *buf;
493 unsigned int desc_count;
494 struct wq_enet_desc *descs, *desc_p, desc_tmp;
496 uint8_t vlan_tag_insert;
499 uint8_t offload_mode;
502 rte_atomic64_t *tx_oversized;
504 enic_cleanup_wq(enic, wq);
505 wq_desc_avail = vnic_wq_desc_avail(wq);
506 head_idx = wq->head_idx;
507 desc_count = wq->ring.desc_count;
508 ol_flags_mask = PKT_TX_VLAN_PKT | PKT_TX_IP_CKSUM | PKT_TX_L4_MASK;
509 tx_oversized = &enic->soft_stats.tx_oversized;
511 nb_pkts = RTE_MIN(nb_pkts, ENIC_TX_XMIT_MAX);
513 for (index = 0; index < nb_pkts; index++) {
515 pkt_len = tx_pkt->pkt_len;
516 data_len = tx_pkt->data_len;
517 ol_flags = tx_pkt->ol_flags;
518 nb_segs = tx_pkt->nb_segs;
519 tso = ol_flags & PKT_TX_TCP_SEG;
521 /* drop packet if it's too big to send */
522 if (unlikely(!tso && pkt_len > ENIC_TX_MAX_PKT_SIZE)) {
523 rte_pktmbuf_free(tx_pkt);
524 rte_atomic64_inc(tx_oversized);
528 if (nb_segs > wq_desc_avail) {
535 vlan_id = tx_pkt->vlan_tci;
536 vlan_tag_insert = !!(ol_flags & PKT_TX_VLAN_PKT);
537 bus_addr = (dma_addr_t)
538 (tx_pkt->buf_iova + tx_pkt->data_off);
540 descs = (struct wq_enet_desc *)wq->ring.descs;
541 desc_p = descs + head_idx;
543 eop = (data_len == pkt_len);
544 offload_mode = WQ_ENET_OFFLOAD_MODE_CSUM;
548 header_len = tx_pkt->l2_len + tx_pkt->l3_len +
551 /* Drop if non-TCP packet or TSO seg size is too big */
552 if (unlikely(header_len == 0 || ((tx_pkt->tso_segsz +
553 header_len) > ENIC_TX_MAX_PKT_SIZE))) {
554 rte_pktmbuf_free(tx_pkt);
555 rte_atomic64_inc(tx_oversized);
559 offload_mode = WQ_ENET_OFFLOAD_MODE_TSO;
560 mss = tx_pkt->tso_segsz;
563 if ((ol_flags & ol_flags_mask) && (header_len == 0)) {
564 if (ol_flags & PKT_TX_IP_CKSUM)
565 mss |= ENIC_CALC_IP_CKSUM;
567 /* Nic uses just 1 bit for UDP and TCP */
568 switch (ol_flags & PKT_TX_L4_MASK) {
569 case PKT_TX_TCP_CKSUM:
570 case PKT_TX_UDP_CKSUM:
571 mss |= ENIC_CALC_TCP_UDP_CKSUM;
577 wq_enet_desc_enc(&desc_tmp, bus_addr, data_len, mss, header_len,
578 offload_mode, eop, eop, 0, vlan_tag_insert,
582 buf = &wq->bufs[head_idx];
583 buf->mb = (void *)tx_pkt;
584 head_idx = enic_ring_incr(desc_count, head_idx);
588 for (tx_pkt = tx_pkt->next; tx_pkt; tx_pkt =
590 data_len = tx_pkt->data_len;
592 if (tx_pkt->next == NULL)
594 desc_p = descs + head_idx;
595 bus_addr = (dma_addr_t)(tx_pkt->buf_iova
597 wq_enet_desc_enc((struct wq_enet_desc *)
598 &desc_tmp, bus_addr, data_len,
599 mss, 0, offload_mode, eop, eop,
600 0, vlan_tag_insert, vlan_id,
604 buf = &wq->bufs[head_idx];
605 buf->mb = (void *)tx_pkt;
606 head_idx = enic_ring_incr(desc_count, head_idx);
613 iowrite32_relaxed(head_idx, &wq->ctrl->posted_index);
615 wq->ring.desc_avail = wq_desc_avail;
616 wq->head_idx = head_idx;