X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fiavf%2Fiavf_rxtx.c;h=88661e5d74aaa4f3e5d40ed70a8cd51e8c4c6ddd;hb=7483341ae5533c5d5fa080a5d229e6f2daf03ea5;hp=1ddbad0f1f287cb0255f524a3f3a6d8957ad547f;hpb=82fca5d18b6d05fa9800a0172b55f0f6f57825b5;p=dpdk.git diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c index 1ddbad0f1f..88661e5d74 100644 --- a/drivers/net/iavf/iavf_rxtx.c +++ b/drivers/net/iavf/iavf_rxtx.c @@ -17,7 +17,7 @@ #include #include #include -#include +#include #include #include #include @@ -57,6 +57,39 @@ iavf_proto_xtr_type_to_rxdid(uint8_t flex_type) rxdid_map[flex_type] : IAVF_RXDID_COMMS_OVS_1; } +static int +iavf_monitor_callback(const uint64_t value, + const uint64_t arg[RTE_POWER_MONITOR_OPAQUE_SZ] __rte_unused) +{ + const uint64_t m = rte_cpu_to_le_64(1 << IAVF_RX_DESC_STATUS_DD_SHIFT); + /* + * we expect the DD bit to be set to 1 if this descriptor was already + * written to. + */ + return (value & m) == m ? -1 : 0; +} + +int +iavf_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc) +{ + struct iavf_rx_queue *rxq = rx_queue; + volatile union iavf_rx_desc *rxdp; + uint16_t desc; + + desc = rxq->rx_tail; + rxdp = &rxq->rx_ring[desc]; + /* watch for changes in status bit */ + pmc->addr = &rxdp->wb.qword1.status_error_len; + + /* comparison callback */ + pmc->fn = iavf_monitor_callback; + + /* registers are 64-bit */ + pmc->size = sizeof(uint64_t); + + return 0; +} + static inline int check_rx_thresh(uint16_t nb_desc, uint16_t thresh) { @@ -135,7 +168,7 @@ check_rx_vec_allow(struct iavf_rx_queue *rxq) static inline bool check_tx_vec_allow(struct iavf_tx_queue *txq) { - if (!(txq->offloads & IAVF_NO_VECTOR_FLAGS) && + if (!(txq->offloads & IAVF_TX_NO_VECTOR_FLAGS) && txq->rs_thresh >= IAVF_VPMD_TX_MAX_BURST && txq->rs_thresh <= IAVF_VPMD_TX_MAX_FREE_BUF) { PMD_INIT_LOG(DEBUG, "Vector tx can be enabled on this txq."); @@ -192,6 +225,10 @@ reset_rx_queue(struct iavf_rx_queue *rxq) rxq->rx_tail = 0; rxq->nb_rx_hold = 0; + + if (rxq->pkt_first_seg != NULL) + rte_pktmbuf_free(rxq->pkt_first_seg); + rxq->pkt_first_seg = NULL; rxq->pkt_last_seg = NULL; rxq->rxrearm_nb = 0; @@ -241,11 +278,15 @@ alloc_rxq_mbufs(struct iavf_rx_queue *rxq) volatile union iavf_rx_desc *rxd; struct rte_mbuf *mbuf = NULL; uint64_t dma_addr; - uint16_t i; + uint16_t i, j; for (i = 0; i < rxq->nb_rx_desc; i++) { mbuf = rte_mbuf_raw_alloc(rxq->mp); if (unlikely(!mbuf)) { + for (j = 0; j < i; j++) { + rte_pktmbuf_free_seg(rxq->sw_ring[j]); + rxq->sw_ring[j] = NULL; + } PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX"); return -ENOMEM; } @@ -498,9 +539,12 @@ iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, uint8_t proto_xtr; uint16_t len; uint16_t rx_free_thresh; + uint64_t offloads; PMD_INIT_FUNC_TRACE(); + offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads; + if (nb_desc % IAVF_ALIGN_RING_DESC != 0 || nb_desc > IAVF_MAX_RING_DESC || nb_desc < IAVF_MIN_RING_DESC) { @@ -518,7 +562,7 @@ iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, /* Free memory if needed */ if (dev->data->rx_queues[queue_idx]) { - iavf_dev_rx_queue_release(dev->data->rx_queues[queue_idx]); + iavf_dev_rx_queue_release(dev, queue_idx); dev->data->rx_queues[queue_idx] = NULL; } @@ -543,6 +587,24 @@ iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, rxq->proto_xtr = IAVF_PROTO_XTR_NONE; } + if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) { + struct virtchnl_vlan_supported_caps *stripping_support = + &vf->vlan_v2_caps.offloads.stripping_support; + uint32_t stripping_cap; + + if (stripping_support->outer) + stripping_cap = stripping_support->outer; + else + stripping_cap = stripping_support->inner; + + if (stripping_cap & VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1) + rxq->rx_flags = IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG1; + else if (stripping_cap & VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2) + rxq->rx_flags = IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2; + } else { + rxq->rx_flags = IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG1; + } + iavf_select_rxd_to_pkt_fields_handler(rxq, rxq->rxdid); rxq->mp = mp; @@ -553,6 +615,7 @@ iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, rxq->rx_deferred_start = rx_conf->rx_deferred_start; rxq->rx_hdr_len = 0; rxq->vsi = vsi; + rxq->offloads = offloads; if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) rxq->crc_len = RTE_ETHER_CRC_LEN; @@ -560,7 +623,7 @@ iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, rxq->crc_len = 0; len = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM; - rxq->rx_buf_len = RTE_ALIGN(len, (1 << IAVF_RXQ_CTX_DBUFF_SHIFT)); + rxq->rx_buf_len = RTE_ALIGN_FLOOR(len, (1 << IAVF_RXQ_CTX_DBUFF_SHIFT)); /* Allocate the software ring. */ len = nb_desc + IAVF_RX_MAX_BURST; @@ -629,6 +692,8 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev, const struct rte_eth_txconf *tx_conf) { struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct iavf_info *vf = + IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); struct iavf_tx_queue *txq; const struct rte_memzone *mz; uint32_t ring_size; @@ -651,11 +716,12 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev, tx_conf->tx_rs_thresh : DEFAULT_TX_RS_THRESH); tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ? tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH); - check_tx_thresh(nb_desc, tx_rs_thresh, tx_rs_thresh); + if (check_tx_thresh(nb_desc, tx_rs_thresh, tx_free_thresh) != 0) + return -EINVAL; /* Free memory if needed. */ if (dev->data->tx_queues[queue_idx]) { - iavf_dev_tx_queue_release(dev->data->tx_queues[queue_idx]); + iavf_dev_tx_queue_release(dev, queue_idx); dev->data->tx_queues[queue_idx] = NULL; } @@ -670,6 +736,24 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev, return -ENOMEM; } + if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) { + struct virtchnl_vlan_supported_caps *insertion_support = + &vf->vlan_v2_caps.offloads.insertion_support; + uint32_t insertion_cap; + + if (insertion_support->outer) + insertion_cap = insertion_support->outer; + else + insertion_cap = insertion_support->inner; + + if (insertion_cap & VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1) + txq->vlan_flag = IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG1; + else if (insertion_cap & VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2) + txq->vlan_flag = IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2; + } else { + txq->vlan_flag = IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG1; + } + txq->nb_tx_desc = nb_desc; txq->rs_thresh = tx_rs_thresh; txq->free_thresh = tx_free_thresh; @@ -718,6 +802,22 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev, ad->tx_vec_allowed = false; } + if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS && + vf->tm_conf.committed) { + int tc; + for (tc = 0; tc < vf->qos_cap->num_elem; tc++) { + if (txq->queue_id >= vf->qtc_map[tc].start_queue_id && + txq->queue_id < (vf->qtc_map[tc].start_queue_id + + vf->qtc_map[tc].queue_count)) + break; + } + if (tc >= vf->qos_cap->num_elem) { + PMD_INIT_LOG(ERR, "Queue TC mapping is not correct"); + return -EINVAL; + } + txq->tc = tc; + } + return 0; } @@ -756,12 +856,14 @@ iavf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) else err = iavf_switch_queue_lv(adapter, rx_queue_id, true, true); - if (err) + if (err) { + release_rxq_mbufs(rxq); PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on", rx_queue_id); - else + } else { dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; + } return err; } @@ -860,9 +962,9 @@ iavf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) } void -iavf_dev_rx_queue_release(void *rxq) +iavf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid) { - struct iavf_rx_queue *q = (struct iavf_rx_queue *)rxq; + struct iavf_rx_queue *q = dev->data->rx_queues[qid]; if (!q) return; @@ -874,9 +976,9 @@ iavf_dev_rx_queue_release(void *rxq) } void -iavf_dev_tx_queue_release(void *txq) +iavf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid) { - struct iavf_tx_queue *q = (struct iavf_tx_queue *)txq; + struct iavf_tx_queue *q = dev->data->tx_queues[qid]; if (!q) return; @@ -952,31 +1054,27 @@ iavf_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union iavf_rx_desc *rxdp) static inline void iavf_flex_rxd_to_vlan_tci(struct rte_mbuf *mb, - volatile union iavf_rx_flex_desc *rxdp) + volatile union iavf_rx_flex_desc *rxdp, + uint8_t rx_flags) { - if (rte_le_to_cpu_64(rxdp->wb.status_error0) & - (1 << IAVF_RX_FLEX_DESC_STATUS0_L2TAG1P_S)) { - mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED; - mb->vlan_tci = - rte_le_to_cpu_16(rxdp->wb.l2tag1); - } else { - mb->vlan_tci = 0; - } + uint16_t vlan_tci = 0; + + if (rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG1 && + rte_le_to_cpu_64(rxdp->wb.status_error0) & + (1 << IAVF_RX_FLEX_DESC_STATUS0_L2TAG1P_S)) + vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag1); #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC - if (rte_le_to_cpu_16(rxdp->wb.status_error1) & - (1 << IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S)) { - mb->ol_flags |= PKT_RX_QINQ_STRIPPED | PKT_RX_QINQ | - PKT_RX_VLAN_STRIPPED | PKT_RX_VLAN; - mb->vlan_tci_outer = mb->vlan_tci; - mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd); - PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u", - rte_le_to_cpu_16(rxdp->wb.l2tag2_1st), - rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd)); - } else { - mb->vlan_tci_outer = 0; - } + if (rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2 && + rte_le_to_cpu_16(rxdp->wb.status_error1) & + (1 << IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S)) + vlan_tci = rte_le_to_cpu_16(rxdp->wb.l2tag2_2nd); #endif + + if (vlan_tci) { + mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED; + mb->vlan_tci = vlan_tci; + } } /* Translate the rx descriptor status and error fields to pkt flags */ @@ -1075,7 +1173,7 @@ iavf_flex_rxd_error_to_pkt_flags(uint16_t stat_err0) flags |= PKT_RX_L4_CKSUM_GOOD; if (unlikely(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S))) - flags |= PKT_RX_EIP_CKSUM_BAD; + flags |= PKT_RX_OUTER_IP_CKSUM_BAD; return flags; } @@ -1098,7 +1196,7 @@ iavf_update_rx_tail(struct iavf_rx_queue *rxq, uint16_t nb_hold, uint16_t rx_id) rxq->port_id, rxq->queue_id, rx_id, nb_hold); rx_id = (uint16_t)((rx_id == 0) ? (rxq->nb_rx_desc - 1) : (rx_id - 1)); - IAVF_PCI_REG_WRITE(rxq->qrx_tail, rx_id); + IAVF_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id); nb_hold = 0; } rxq->nb_rx_hold = nb_hold; @@ -1155,6 +1253,7 @@ iavf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) rxd = *rxdp; nb_hold++; rxe = rxq->sw_ring[rx_id]; + rxq->sw_ring[rx_id] = nmb; rx_id++; if (unlikely(rx_id == rxq->nb_rx_desc)) rx_id = 0; @@ -1260,6 +1359,7 @@ iavf_recv_pkts_flex_rxd(void *rx_queue, rxd = *rxdp; nb_hold++; rxe = rxq->sw_ring[rx_id]; + rxq->sw_ring[rx_id] = nmb; rx_id++; if (unlikely(rx_id == rxq->nb_rx_desc)) rx_id = 0; @@ -1294,7 +1394,7 @@ iavf_recv_pkts_flex_rxd(void *rx_queue, rxm->ol_flags = 0; rxm->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M & rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)]; - iavf_flex_rxd_to_vlan_tci(rxm, &rxd); + iavf_flex_rxd_to_vlan_tci(rxm, &rxd, rxq->rx_flags); rxq->rxd_to_pkt_fields(rxq, rxm, &rxd); pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0); rxm->ol_flags |= pkt_flags; @@ -1351,6 +1451,7 @@ iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts, rxd = *rxdp; nb_hold++; rxe = rxq->sw_ring[rx_id]; + rxq->sw_ring[rx_id] = nmb; rx_id++; if (rx_id == rxq->nb_rx_desc) rx_id = 0; @@ -1435,7 +1536,7 @@ iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts, first_seg->ol_flags = 0; first_seg->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M & rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)]; - iavf_flex_rxd_to_vlan_tci(first_seg, &rxd); + iavf_flex_rxd_to_vlan_tci(first_seg, &rxd, rxq->rx_flags); rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd); pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0); @@ -1504,6 +1605,7 @@ iavf_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, rxd = *rxdp; nb_hold++; rxe = rxq->sw_ring[rx_id]; + rxq->sw_ring[rx_id] = nmb; rx_id++; if (rx_id == rxq->nb_rx_desc) rx_id = 0; @@ -1672,7 +1774,7 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq) mb->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M & rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)]; - iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j]); + iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j], rxq->rx_flags); rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]); stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0); pkt_flags = iavf_flex_rxd_error_to_pkt_flags(stat_err0); @@ -1843,7 +1945,7 @@ iavf_rx_alloc_bufs(struct iavf_rx_queue *rxq) /* Update rx tail register */ rte_wmb(); - IAVF_PCI_REG_WRITE_RELAXED(rxq->qrx_tail, rxq->rx_free_trigger); + IAVF_PCI_REG_WC_WRITE_RELAXED(rxq->qrx_tail, rxq->rx_free_trigger); rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_trigger + rxq->rx_free_thresh); @@ -1945,9 +2047,9 @@ iavf_xmit_cleanup(struct iavf_tx_queue *txq) if ((txd[desc_to_clean_to].cmd_type_offset_bsz & rte_cpu_to_le_64(IAVF_TXD_QW1_DTYPE_MASK)) != rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE)) { - PMD_TX_FREE_LOG(DEBUG, "TX descriptor %4u is not done " - "(port=%d queue=%d)", desc_to_clean_to, - txq->port_id, txq->queue_id); + PMD_TX_LOG(DEBUG, "TX descriptor %4u is not done " + "(port=%d queue=%d)", desc_to_clean_to, + txq->port_id, txq->queue_id); return -1; } @@ -1968,11 +2070,14 @@ iavf_xmit_cleanup(struct iavf_tx_queue *txq) /* Check if the context descriptor is needed for TX offloading */ static inline uint16_t -iavf_calc_context_desc(uint64_t flags) +iavf_calc_context_desc(uint64_t flags, uint8_t vlan_flag) { - static uint64_t mask = PKT_TX_TCP_SEG; - - return (flags & mask) ? 1 : 0; + if (flags & PKT_TX_TCP_SEG) + return 1; + if (flags & PKT_TX_VLAN_PKT && + vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2) + return 1; + return 0; } static inline void @@ -2093,6 +2198,7 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) uint16_t tx_last; uint16_t slen; uint64_t buf_dma_addr; + uint16_t cd_l2tag2 = 0; union iavf_tx_offload tx_offload = {0}; txq = tx_queue; @@ -2119,7 +2225,7 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) tx_offload.l4_len = tx_pkt->l4_len; tx_offload.tso_segsz = tx_pkt->tso_segsz; /* Calculate the number of context descriptors needed. */ - nb_ctx = iavf_calc_context_desc(ol_flags); + nb_ctx = iavf_calc_context_desc(ol_flags, txq->vlan_flag); /* The number of descriptors that must be allocated for * a packet equals to the number of the segments of that @@ -2154,7 +2260,8 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) } /* Descriptor based VLAN insertion */ - if (ol_flags & PKT_TX_VLAN_PKT) { + if (ol_flags & PKT_TX_VLAN_PKT && + txq->vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG1) { td_cmd |= IAVF_TX_DESC_CMD_IL2TAG1; td_tag = tx_pkt->vlan_tci; } @@ -2177,6 +2284,11 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) (volatile struct iavf_tx_context_desc *) &txr[tx_id]; + /* clear QW0 or the previous writeback value + * may impact next write + */ + *(volatile uint64_t *)ctx_txd = 0; + txn = &sw_ring[txe->next_id]; RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf); if (txe->mbuf) { @@ -2189,8 +2301,16 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) cd_type_cmd_tso_mss |= iavf_set_tso_ctx(tx_pkt, tx_offload); + if (ol_flags & PKT_TX_VLAN_PKT && + txq->vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2) { + cd_type_cmd_tso_mss |= IAVF_TX_CTX_DESC_IL2TAG2 + << IAVF_TXD_CTX_QW1_CMD_SHIFT; + cd_l2tag2 = tx_pkt->vlan_tci; + } + ctx_txd->type_cmd_tso_mss = rte_cpu_to_le_64(cd_type_cmd_tso_mss); + ctx_txd->l2tag2 = rte_cpu_to_le_16(cd_l2tag2); IAVF_DUMP_TX_DESC(txq, &txr[tx_id], tx_id); txe->last_id = tx_last; @@ -2251,12 +2371,33 @@ end_of_tx: PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u", txq->port_id, txq->queue_id, tx_id, nb_tx); - IAVF_PCI_REG_WRITE_RELAXED(txq->qtx_tail, tx_id); + IAVF_PCI_REG_WC_WRITE_RELAXED(txq->qtx_tail, tx_id); txq->tx_tail = tx_id; return nb_tx; } +/* Check if the packet with vlan user priority is transmitted in the + * correct queue. + */ +static int +iavf_check_vlan_up2tc(struct iavf_tx_queue *txq, struct rte_mbuf *m) +{ + struct rte_eth_dev *dev = &rte_eth_devices[txq->port_id]; + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + uint16_t up; + + up = m->vlan_tci >> IAVF_VLAN_TAG_PCP_OFFSET; + + if (!(vf->qos_cap->cap[txq->tc].tc_prio & BIT(up))) { + PMD_TX_LOG(ERR, "packet with vlan pcp %u cannot transmit in queue %u\n", + up, txq->queue_id); + return -1; + } else { + return 0; + } +} + /* TX prep functions */ uint16_t iavf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, @@ -2265,6 +2406,9 @@ iavf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, int i, ret; uint64_t ol_flags; struct rte_mbuf *m; + struct iavf_tx_queue *txq = tx_queue; + struct rte_eth_dev *dev = &rte_eth_devices[txq->port_id]; + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); for (i = 0; i < nb_pkts; i++) { m = tx_pkts[i]; @@ -2288,7 +2432,7 @@ iavf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, return i; } -#ifdef RTE_LIBRTE_ETHDEV_DEBUG +#ifdef RTE_ETHDEV_DEBUG_TX ret = rte_validate_tx_offload(m); if (ret != 0) { rte_errno = -ret; @@ -2300,6 +2444,15 @@ iavf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, rte_errno = -ret; return i; } + + if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS && + ol_flags & (PKT_RX_VLAN_STRIPPED | PKT_RX_VLAN)) { + ret = iavf_check_vlan_up2tc(txq, m); + if (ret != 0) { + rte_errno = -ret; + return i; + } + } } return i; @@ -2316,22 +2469,19 @@ iavf_set_rx_function(struct rte_eth_dev *dev) #ifdef RTE_ARCH_X86 struct iavf_rx_queue *rxq; int i; + int check_ret; bool use_avx2 = false; -#ifdef CC_AVX512_SUPPORT bool use_avx512 = false; -#endif - - if (!iavf_rx_vec_dev_check(dev) && - rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) { - for (i = 0; i < dev->data->nb_rx_queues; i++) { - rxq = dev->data->rx_queues[i]; - (void)iavf_rxq_vec_setup(rxq); - } + bool use_flex = false; + check_ret = iavf_rx_vec_dev_check(dev); + if (check_ret >= 0 && + rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) { if ((rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 || rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) && - rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256) + rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256) use_avx2 = true; + #ifdef CC_AVX512_SUPPORT if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 && rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1 && @@ -2339,61 +2489,110 @@ iavf_set_rx_function(struct rte_eth_dev *dev) use_avx512 = true; #endif + if (vf->vf_res->vf_cap_flags & + VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) + use_flex = true; + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + (void)iavf_rxq_vec_setup(rxq); + } + if (dev->data->scattered_rx) { - PMD_DRV_LOG(DEBUG, - "Using %sVector Scattered Rx (port %d).", - use_avx2 ? "avx2 " : "", - dev->data->port_id); - if (vf->vf_res->vf_cap_flags & - VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) { + if (!use_avx512) { + PMD_DRV_LOG(DEBUG, + "Using %sVector Scattered Rx (port %d).", + use_avx2 ? "avx2 " : "", + dev->data->port_id); + } else { + if (check_ret == IAVF_VECTOR_PATH) + PMD_DRV_LOG(DEBUG, + "Using AVX512 Vector Scattered Rx (port %d).", + dev->data->port_id); + else + PMD_DRV_LOG(DEBUG, + "Using AVX512 OFFLOAD Vector Scattered Rx (port %d).", + dev->data->port_id); + } + if (use_flex) { dev->rx_pkt_burst = use_avx2 ? iavf_recv_scattered_pkts_vec_avx2_flex_rxd : iavf_recv_scattered_pkts_vec_flex_rxd; #ifdef CC_AVX512_SUPPORT - if (use_avx512) - dev->rx_pkt_burst = - iavf_recv_scattered_pkts_vec_avx512_flex_rxd; + if (use_avx512) { + if (check_ret == IAVF_VECTOR_PATH) + dev->rx_pkt_burst = + iavf_recv_scattered_pkts_vec_avx512_flex_rxd; + else + dev->rx_pkt_burst = + iavf_recv_scattered_pkts_vec_avx512_flex_rxd_offload; + } #endif } else { dev->rx_pkt_burst = use_avx2 ? iavf_recv_scattered_pkts_vec_avx2 : iavf_recv_scattered_pkts_vec; #ifdef CC_AVX512_SUPPORT - if (use_avx512) - dev->rx_pkt_burst = - iavf_recv_scattered_pkts_vec_avx512; + if (use_avx512) { + if (check_ret == IAVF_VECTOR_PATH) + dev->rx_pkt_burst = + iavf_recv_scattered_pkts_vec_avx512; + else + dev->rx_pkt_burst = + iavf_recv_scattered_pkts_vec_avx512_offload; + } #endif } } else { - PMD_DRV_LOG(DEBUG, "Using %sVector Rx (port %d).", - use_avx2 ? "avx2 " : "", - dev->data->port_id); - if (vf->vf_res->vf_cap_flags & - VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) { + if (!use_avx512) { + PMD_DRV_LOG(DEBUG, "Using %sVector Rx (port %d).", + use_avx2 ? "avx2 " : "", + dev->data->port_id); + } else { + if (check_ret == IAVF_VECTOR_PATH) + PMD_DRV_LOG(DEBUG, + "Using AVX512 Vector Rx (port %d).", + dev->data->port_id); + else + PMD_DRV_LOG(DEBUG, + "Using AVX512 OFFLOAD Vector Rx (port %d).", + dev->data->port_id); + } + if (use_flex) { dev->rx_pkt_burst = use_avx2 ? iavf_recv_pkts_vec_avx2_flex_rxd : iavf_recv_pkts_vec_flex_rxd; #ifdef CC_AVX512_SUPPORT - if (use_avx512) - dev->rx_pkt_burst = - iavf_recv_pkts_vec_avx512_flex_rxd; + if (use_avx512) { + if (check_ret == IAVF_VECTOR_PATH) + dev->rx_pkt_burst = + iavf_recv_pkts_vec_avx512_flex_rxd; + else + dev->rx_pkt_burst = + iavf_recv_pkts_vec_avx512_flex_rxd_offload; + } #endif } else { dev->rx_pkt_burst = use_avx2 ? iavf_recv_pkts_vec_avx2 : iavf_recv_pkts_vec; #ifdef CC_AVX512_SUPPORT - if (use_avx512) - dev->rx_pkt_burst = - iavf_recv_pkts_vec_avx512; + if (use_avx512) { + if (check_ret == IAVF_VECTOR_PATH) + dev->rx_pkt_burst = + iavf_recv_pkts_vec_avx512; + else + dev->rx_pkt_burst = + iavf_recv_pkts_vec_avx512_offload; + } #endif } } return; } -#endif +#endif if (dev->data->scattered_rx) { PMD_DRV_LOG(DEBUG, "Using a Scattered Rx callback (port=%d).", dev->data->port_id); @@ -2422,17 +2621,23 @@ iavf_set_tx_function(struct rte_eth_dev *dev) #ifdef RTE_ARCH_X86 struct iavf_tx_queue *txq; int i; + int check_ret; + bool use_sse = false; bool use_avx2 = false; -#ifdef CC_AVX512_SUPPORT bool use_avx512 = false; -#endif - if (!iavf_tx_vec_dev_check(dev) && - rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) { - if ((rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 || - rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) && - rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256) - use_avx2 = true; + check_ret = iavf_tx_vec_dev_check(dev); + + if (check_ret >= 0 && + rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) { + /* SSE and AVX2 not support offload path yet. */ + if (check_ret == IAVF_VECTOR_PATH) { + use_sse = true; + if ((rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 || + rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) && + rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256) + use_avx2 = true; + } #ifdef CC_AVX512_SUPPORT if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 && rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1 && @@ -2440,17 +2645,32 @@ iavf_set_tx_function(struct rte_eth_dev *dev) use_avx512 = true; #endif - PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).", - use_avx2 ? "avx2 " : "", - dev->data->port_id); - dev->tx_pkt_burst = use_avx2 ? - iavf_xmit_pkts_vec_avx2 : - iavf_xmit_pkts_vec; + if (!use_sse && !use_avx2 && !use_avx512) + goto normal; + + if (!use_avx512) { + PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).", + use_avx2 ? "avx2 " : "", + dev->data->port_id); + dev->tx_pkt_burst = use_avx2 ? + iavf_xmit_pkts_vec_avx2 : + iavf_xmit_pkts_vec; + } + dev->tx_pkt_prepare = NULL; #ifdef CC_AVX512_SUPPORT - if (use_avx512) - dev->tx_pkt_burst = iavf_xmit_pkts_vec_avx512; + if (use_avx512) { + if (check_ret == IAVF_VECTOR_PATH) { + dev->tx_pkt_burst = iavf_xmit_pkts_vec_avx512; + PMD_DRV_LOG(DEBUG, "Using AVX512 Vector Tx (port %d).", + dev->data->port_id); + } else { + dev->tx_pkt_burst = iavf_xmit_pkts_vec_avx512_offload; + dev->tx_pkt_prepare = iavf_prep_pkts; + PMD_DRV_LOG(DEBUG, "Using AVX512 OFFLOAD Vector Tx (port %d).", + dev->data->port_id); + } + } #endif - dev->tx_pkt_prepare = NULL; for (i = 0; i < dev->data->nb_tx_queues; i++) { txq = dev->data->tx_queues[i]; @@ -2468,8 +2688,9 @@ iavf_set_tx_function(struct rte_eth_dev *dev) return; } -#endif +normal: +#endif PMD_DRV_LOG(DEBUG, "Using Basic Tx callback (port=%d).", dev->data->port_id); dev->tx_pkt_burst = iavf_xmit_pkts;