X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fdpaa2%2Fdpaa2_rxtx.c;h=eab943dcf21f1438a2fadd0ccff0df5ab3761723;hb=1b4ce87dc5e6;hp=532de940c64cfb613457b1d457d79de4537d2279;hpb=a10a988a0ba6fed1690ffc0218e5d3e189fcad2d;p=dpdk.git diff --git a/drivers/net/dpaa2/dpaa2_rxtx.c b/drivers/net/dpaa2/dpaa2_rxtx.c index 532de940c6..eab943dcf2 100644 --- a/drivers/net/dpaa2/dpaa2_rxtx.c +++ b/drivers/net/dpaa2/dpaa2_rxtx.c @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. - * Copyright 2016 NXP + * Copyright 2016-2018 NXP * */ @@ -25,18 +25,24 @@ #include "dpaa2_ethdev.h" #include "base/dpaa2_hw_dpni_annot.h" +static inline uint32_t __attribute__((hot)) +dpaa2_dev_rx_parse_slow(struct rte_mbuf *mbuf, + struct dpaa2_annot_hdr *annotation); + #define DPAA2_MBUF_TO_CONTIG_FD(_mbuf, _fd, _bpid) do { \ DPAA2_SET_FD_ADDR(_fd, DPAA2_MBUF_VADDR_TO_IOVA(_mbuf)); \ DPAA2_SET_FD_LEN(_fd, _mbuf->data_len); \ DPAA2_SET_ONLY_FD_BPID(_fd, _bpid); \ DPAA2_SET_FD_OFFSET(_fd, _mbuf->data_off); \ - DPAA2_SET_FD_ASAL(_fd, DPAA2_ASAL_VAL); \ + DPAA2_SET_FD_FRC(_fd, 0); \ + DPAA2_RESET_FD_CTRL(_fd); \ + DPAA2_RESET_FD_FLC(_fd); \ } while (0) static inline void __attribute__((hot)) -dpaa2_dev_rx_parse_frc(struct rte_mbuf *m, uint16_t frc) +dpaa2_dev_rx_parse_new(struct rte_mbuf *m, const struct qbman_fd *fd) { - DPAA2_PMD_DP_DEBUG("frc = 0x%x\t", frc); + uint16_t frc = DPAA2_GET_FD_FRC_PARSE_SUM(fd); m->packet_type = RTE_PTYPE_UNKNOWN; switch (frc) { @@ -91,29 +97,45 @@ dpaa2_dev_rx_parse_frc(struct rte_mbuf *m, uint16_t frc) m->packet_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_ICMP; break; - case DPAA2_PKT_TYPE_VLAN_1: - case DPAA2_PKT_TYPE_VLAN_2: - m->ol_flags |= PKT_RX_VLAN; - break; - /* More switch cases can be added */ - /* TODO: Add handling for checksum error check from FRC */ default: - m->packet_type = RTE_PTYPE_UNKNOWN; + m->packet_type = dpaa2_dev_rx_parse_slow(m, + (void *)((size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) + + DPAA2_FD_PTA_SIZE)); } + m->hash.rss = fd->simple.flc_hi; + m->ol_flags |= PKT_RX_RSS_HASH; } static inline uint32_t __attribute__((hot)) -dpaa2_dev_rx_parse_slow(struct dpaa2_annot_hdr *annotation) +dpaa2_dev_rx_parse_slow(struct rte_mbuf *mbuf, + struct dpaa2_annot_hdr *annotation) { uint32_t pkt_type = RTE_PTYPE_UNKNOWN; + uint16_t *vlan_tci; + + DPAA2_PMD_DP_DEBUG("(slow parse)annotation(3)=0x%" PRIx64 "\t" + "(4)=0x%" PRIx64 "\t", + annotation->word3, annotation->word4); + + if (BIT_ISSET_AT_POS(annotation->word3, L2_VLAN_1_PRESENT)) { + vlan_tci = rte_pktmbuf_mtod_offset(mbuf, uint16_t *, + (VLAN_TCI_OFFSET_1(annotation->word5) >> 16)); + mbuf->vlan_tci = rte_be_to_cpu_16(*vlan_tci); + mbuf->ol_flags |= PKT_RX_VLAN; + pkt_type |= RTE_PTYPE_L2_ETHER_VLAN; + } else if (BIT_ISSET_AT_POS(annotation->word3, L2_VLAN_N_PRESENT)) { + vlan_tci = rte_pktmbuf_mtod_offset(mbuf, uint16_t *, + (VLAN_TCI_OFFSET_1(annotation->word5) >> 16)); + mbuf->vlan_tci = rte_be_to_cpu_16(*vlan_tci); + mbuf->ol_flags |= PKT_RX_VLAN | PKT_RX_QINQ; + pkt_type |= RTE_PTYPE_L2_ETHER_QINQ; + } - DPAA2_PMD_DP_DEBUG("(slow parse) Annotation = 0x%" PRIx64 "\t", - annotation->word4); if (BIT_ISSET_AT_POS(annotation->word3, L2_ARP_PRESENT)) { - pkt_type = RTE_PTYPE_L2_ETHER_ARP; + pkt_type |= RTE_PTYPE_L2_ETHER_ARP; goto parse_done; } else if (BIT_ISSET_AT_POS(annotation->word3, L2_ETH_MAC_PRESENT)) { - pkt_type = RTE_PTYPE_L2_ETHER; + pkt_type |= RTE_PTYPE_L2_ETHER; } else { goto parse_done; } @@ -135,6 +157,11 @@ dpaa2_dev_rx_parse_slow(struct dpaa2_annot_hdr *annotation) goto parse_done; } + if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE)) + mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD; + else if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE)) + mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD; + if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_FIRST_FRAGMENT | L3_IP_1_MORE_FRAGMENT | L3_IP_N_FIRST_FRAGMENT | @@ -173,16 +200,15 @@ dpaa2_dev_rx_parse(struct rte_mbuf *mbuf, void *hw_annot_addr) DPAA2_PMD_DP_DEBUG("(fast parse) Annotation = 0x%" PRIx64 "\t", annotation->word4); - /* Check offloads first */ - if (BIT_ISSET_AT_POS(annotation->word3, - L2_VLAN_1_PRESENT | L2_VLAN_N_PRESENT)) - mbuf->ol_flags |= PKT_RX_VLAN; - if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE)) mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD; else if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE)) mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD; + /* Check detailed parsing requirement */ + if (annotation->word3 & 0x7FFFFC3FFFF) + return dpaa2_dev_rx_parse_slow(mbuf, annotation); + /* Return some common types from parse processing */ switch (annotation->word4) { case DPAA2_L3_IPv4: @@ -205,7 +231,7 @@ dpaa2_dev_rx_parse(struct rte_mbuf *mbuf, void *hw_annot_addr) break; } - return dpaa2_dev_rx_parse_slow(annotation); + return dpaa2_dev_rx_parse_slow(mbuf, annotation); } static inline struct rte_mbuf *__attribute__((hot)) @@ -236,8 +262,7 @@ eth_sg_fd_to_mbuf(const struct qbman_fd *fd) first_seg->nb_segs = 1; first_seg->next = NULL; if (dpaa2_svr_family == SVR_LX2160A) - dpaa2_dev_rx_parse_frc(first_seg, - DPAA2_GET_FD_FRC_PARSE_SUM(fd)); + dpaa2_dev_rx_parse_new(first_seg, fd); else first_seg->packet_type = dpaa2_dev_rx_parse(first_seg, (void *)((size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) @@ -293,7 +318,7 @@ eth_fd_to_mbuf(const struct qbman_fd *fd) */ if (dpaa2_svr_family == SVR_LX2160A) - dpaa2_dev_rx_parse_frc(mbuf, DPAA2_GET_FD_FRC_PARSE_SUM(fd)); + dpaa2_dev_rx_parse_new(mbuf, fd); else mbuf->packet_type = dpaa2_dev_rx_parse(mbuf, (void *)((size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) @@ -317,12 +342,6 @@ eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf, struct qbman_sge *sgt, *sge = NULL; int i; - if (unlikely(mbuf->ol_flags & PKT_TX_VLAN_PKT)) { - int ret = rte_vlan_insert(&mbuf); - if (ret) - return ret; - } - temp = rte_pktmbuf_alloc(mbuf->pool); if (temp == NULL) { DPAA2_PMD_DP_DEBUG("No memory to allocate S/G table\n"); @@ -389,13 +408,6 @@ static void __attribute__ ((noinline)) __attribute__((hot)) eth_mbuf_to_fd(struct rte_mbuf *mbuf, struct qbman_fd *fd, uint16_t bpid) { - if (unlikely(mbuf->ol_flags & PKT_TX_VLAN_PKT)) { - if (rte_vlan_insert(&mbuf)) { - rte_pktmbuf_free(mbuf); - return; - } - } - DPAA2_MBUF_TO_CONTIG_FD(mbuf, fd, bpid); DPAA2_PMD_DP_DEBUG("mbuf =%p, mbuf->buf_addr =%p, off = %d," @@ -428,12 +440,6 @@ eth_copy_mbuf_to_fd(struct rte_mbuf *mbuf, struct rte_mbuf *m; void *mb = NULL; - if (unlikely(mbuf->ol_flags & PKT_TX_VLAN_PKT)) { - int ret = rte_vlan_insert(&mbuf); - if (ret) - return ret; - } - if (rte_dpaa2_mbuf_alloc_bulk( rte_dpaa2_bpid_info[bpid].bp_list->mp, &mb, 1)) { DPAA2_PMD_DP_DEBUG("Unable to allocated DPAA2 buffer\n"); @@ -466,6 +472,12 @@ eth_copy_mbuf_to_fd(struct rte_mbuf *mbuf, return 0; } +/* This function assumes that caller will be keep the same value for nb_pkts + * across calls per queue, if that is not the case, better use non-prefetch + * version of rx call. + * It will return the packets as requested in previous call without honoring + * the current nb_pkts or bufs space. + */ uint16_t dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) { @@ -473,7 +485,7 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue; struct qbman_result *dq_storage, *dq_storage1 = NULL; uint32_t fqid = dpaa2_q->fqid; - int ret, num_rx = 0; + int ret, num_rx = 0, pull_size; uint8_t pending, status; struct qbman_swp *swp; const struct qbman_fd *fd, *next_fd; @@ -481,30 +493,31 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) struct queue_storage_info_t *q_storage = dpaa2_q->q_storage; struct rte_eth_dev *dev = dpaa2_q->dev; - if (unlikely(!DPAA2_PER_LCORE_DPIO)) { - ret = dpaa2_affine_qbman_swp(); + if (unlikely(!DPAA2_PER_LCORE_ETHRX_DPIO)) { + ret = dpaa2_affine_qbman_ethrx_swp(); if (ret) { DPAA2_PMD_ERR("Failure in affining portal"); return 0; } } - swp = DPAA2_PER_LCORE_PORTAL; + swp = DPAA2_PER_LCORE_ETHRX_PORTAL; + pull_size = (nb_pkts > dpaa2_dqrr_size) ? dpaa2_dqrr_size : nb_pkts; if (unlikely(!q_storage->active_dqs)) { q_storage->toggle = 0; dq_storage = q_storage->dq_storage[q_storage->toggle]; - q_storage->last_num_pkts = (nb_pkts > DPAA2_DQRR_RING_SIZE) ? - DPAA2_DQRR_RING_SIZE : nb_pkts; + q_storage->last_num_pkts = pull_size; qbman_pull_desc_clear(&pulldesc); qbman_pull_desc_set_numframes(&pulldesc, q_storage->last_num_pkts); qbman_pull_desc_set_fq(&pulldesc, fqid); qbman_pull_desc_set_storage(&pulldesc, dq_storage, - (dma_addr_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1); - if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) { + (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1); + if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) { while (!qbman_check_command_complete( - get_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index))) + get_swp_active_dqs( + DPAA2_PER_LCORE_ETHRX_DPIO->index))) ; - clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index); + clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index); } while (1) { if (qbman_swp_pull(swp, &pulldesc)) { @@ -516,8 +529,9 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) break; } q_storage->active_dqs = dq_storage; - q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index; - set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index, dq_storage); + q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index; + set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index, + dq_storage); } dq_storage = q_storage->active_dqs; @@ -530,10 +544,10 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) q_storage->toggle ^= 1; dq_storage1 = q_storage->dq_storage[q_storage->toggle]; qbman_pull_desc_clear(&pulldesc); - qbman_pull_desc_set_numframes(&pulldesc, DPAA2_DQRR_RING_SIZE); + qbman_pull_desc_set_numframes(&pulldesc, pull_size); qbman_pull_desc_set_fq(&pulldesc, fqid); qbman_pull_desc_set_storage(&pulldesc, dq_storage1, - (dma_addr_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1); + (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1); /* Check if the previous issued command is completed. * Also seems like the SWP is shared between the Ethernet Driver @@ -565,10 +579,12 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) } fd = qbman_result_DQ_fd(dq_storage); - next_fd = qbman_result_DQ_fd(dq_storage + 1); - /* Prefetch Annotation address for the parse results */ - rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(next_fd) - + DPAA2_FD_PTA_SIZE + 16)); + if (dpaa2_svr_family != SVR_LX2160A) { + next_fd = qbman_result_DQ_fd(dq_storage + 1); + /* Prefetch Annotation address for the parse results */ + rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR( + next_fd) + DPAA2_FD_PTA_SIZE + 16)); + } if (unlikely(DPAA2_FD_GET_FORMAT(fd) == qbman_fd_sg)) bufs[num_rx] = eth_sg_fd_to_mbuf(fd); @@ -576,18 +592,18 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) bufs[num_rx] = eth_fd_to_mbuf(fd); bufs[num_rx]->port = dev->data->port_id; - if (dev->data->dev_conf.rxmode.hw_vlan_strip) + if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP) rte_vlan_strip(bufs[num_rx]); dq_storage++; num_rx++; } while (pending); - if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) { + if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) { while (!qbman_check_command_complete( - get_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index))) + get_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index))) ; - clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index); + clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index); } /* issue a volatile dequeue command for next pull */ while (1) { @@ -599,8 +615,8 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) break; } q_storage->active_dqs = dq_storage1; - q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index; - set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index, dq_storage1); + q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index; + set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index, dq_storage1); dpaa2_q->rx_pkts += num_rx; @@ -695,7 +711,6 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) /*Prepare enqueue descriptor*/ qbman_eq_desc_clear(&eqdesc); qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ); - qbman_eq_desc_set_response(&eqdesc, 0, 0); qbman_eq_desc_set_qd(&eqdesc, priv->qdid, dpaa2_q->flow_id, dpaa2_q->tc_index); /*Clear the unused FD fields before sending*/ @@ -709,7 +724,8 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) goto skip_tx; } - frames_to_send = (nb_pkts >> 3) ? MAX_TX_RING_SLOTS : nb_pkts; + frames_to_send = (nb_pkts > dpaa2_eqcr_size) ? + dpaa2_eqcr_size : nb_pkts; for (loop = 0; loop < frames_to_send; loop++) { if ((*bufs)->seqn) { @@ -722,9 +738,6 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) (*bufs)->seqn = DPAA2_INVALID_MBUF_SEQN; } - fd_arr[loop].simple.frc = 0; - DPAA2_RESET_FD_CTRL((&fd_arr[loop])); - DPAA2_SET_FD_FLC((&fd_arr[loop]), (size_t)NULL); if (likely(RTE_MBUF_DIRECT(*bufs))) { mp = (*bufs)->pool; /* Check the basic scenario and set @@ -734,8 +747,10 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) priv->bp_list->dpaa2_ops_index && (*bufs)->nb_segs == 1 && rte_mbuf_refcnt_read((*bufs)) == 1)) { - if (unlikely((*bufs)->ol_flags - & PKT_TX_VLAN_PKT)) { + if (unlikely(((*bufs)->ol_flags + & PKT_TX_VLAN_PKT) || + (dev->data->dev_conf.txmode.offloads + & DEV_TX_OFFLOAD_VLAN_INSERT))) { ret = rte_vlan_insert(bufs); if (ret) goto send_n_return; @@ -755,6 +770,13 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) goto send_n_return; } + if (unlikely(((*bufs)->ol_flags & PKT_TX_VLAN_PKT) || + (dev->data->dev_conf.txmode.offloads + & DEV_TX_OFFLOAD_VLAN_INSERT))) { + int ret = rte_vlan_insert(bufs); + if (ret) + goto send_n_return; + } if (mp->ops_index != priv->bp_list->dpaa2_ops_index) { DPAA2_PMD_WARN("Non DPAA2 buffer pool"); /* alloc should be from the default buffer pool