X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Faxgbe%2Faxgbe_rxtx.c;h=e34bb6d448fc6b50d85f103cbb915abd7f03bd3d;hb=1b05c5b2b4cb146ff9b08a060f746f7b66708d76;hp=032e3cebceb7f575d9cee9356367b51bee6bfc0f;hpb=2b11056d1ef079d44844df2e6568ced372574e4e;p=dpdk.git diff --git a/drivers/net/axgbe/axgbe_rxtx.c b/drivers/net/axgbe/axgbe_rxtx.c index 032e3cebce..e34bb6d448 100644 --- a/drivers/net/axgbe/axgbe_rxtx.c +++ b/drivers/net/axgbe/axgbe_rxtx.c @@ -209,9 +209,10 @@ axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, volatile union axgbe_rx_desc *desc; uint64_t old_dirty = rxq->dirty; struct rte_mbuf *mbuf, *tmbuf; - unsigned int err; + unsigned int err, etlt; uint32_t error_status; uint16_t idx, pidx, pkt_len; + uint64_t offloads; idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur); while (nb_rx < nb_pkts) { @@ -276,6 +277,26 @@ axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, /* Get the RSS hash */ if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, RSV)) mbuf->hash.rss = rte_le_to_cpu_32(desc->write.desc1); + etlt = AXGMAC_GET_BITS_LE(desc->write.desc3, + RX_NORMAL_DESC3, ETLT); + offloads = rxq->pdata->eth_dev->data->dev_conf.rxmode.offloads; + if (!err || !etlt) { + if (etlt == RX_CVLAN_TAG_PRESENT) { + mbuf->ol_flags |= PKT_RX_VLAN; + mbuf->vlan_tci = + AXGMAC_GET_BITS_LE(desc->write.desc0, + RX_NORMAL_DESC0, OVT); + if (offloads & DEV_RX_OFFLOAD_VLAN_STRIP) + mbuf->ol_flags |= PKT_RX_VLAN_STRIPPED; + else + mbuf->ol_flags &= ~PKT_RX_VLAN_STRIPPED; + } else { + mbuf->ol_flags &= + ~(PKT_RX_VLAN + | PKT_RX_VLAN_STRIPPED); + mbuf->vlan_tci = 0; + } + } /* Indicate if a Context Descriptor is next */ if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, CDA)) mbuf->ol_flags |= PKT_RX_IEEE1588_PTP @@ -324,9 +345,10 @@ uint16_t eth_axgbe_recv_scattered_pkts(void *rx_queue, uint64_t old_dirty = rxq->dirty; struct rte_mbuf *first_seg = NULL; struct rte_mbuf *mbuf, *tmbuf; - unsigned int err; + unsigned int err, etlt; uint32_t error_status; uint16_t idx, pidx, data_len = 0, pkt_len = 0; + uint64_t offloads; idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur); while (nb_rx < nb_pkts) { @@ -399,7 +421,25 @@ next_desc: /* Get the RSS hash */ if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, RSV)) mbuf->hash.rss = rte_le_to_cpu_32(desc->write.desc1); - + etlt = AXGMAC_GET_BITS_LE(desc->write.desc3, + RX_NORMAL_DESC3, ETLT); + offloads = rxq->pdata->eth_dev->data->dev_conf.rxmode.offloads; + if (!err || !etlt) { + if (etlt == RX_CVLAN_TAG_PRESENT) { + mbuf->ol_flags |= PKT_RX_VLAN; + mbuf->vlan_tci = + AXGMAC_GET_BITS_LE(desc->write.desc0, + RX_NORMAL_DESC0, OVT); + if (offloads & DEV_RX_OFFLOAD_VLAN_STRIP) + mbuf->ol_flags |= PKT_RX_VLAN_STRIPPED; + else + mbuf->ol_flags &= ~PKT_RX_VLAN_STRIPPED; + } else { + mbuf->ol_flags &= + ~(PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED); + mbuf->vlan_tci = 0; + } + } /* Mbuf populate */ mbuf->data_off = RTE_PKTMBUF_HEADROOM; mbuf->data_len = data_len; @@ -492,6 +532,7 @@ int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, struct axgbe_tx_queue *txq; unsigned int tsize; const struct rte_memzone *tz; + uint64_t offloads; tx_desc = nb_desc; pdata = dev->data->dev_private; @@ -511,7 +552,8 @@ int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, if (!txq) return -ENOMEM; txq->pdata = pdata; - + offloads = tx_conf->offloads | + txq->pdata->eth_dev->data->dev_conf.txmode.offloads; txq->nb_desc = tx_desc; txq->free_thresh = tx_conf->tx_free_thresh ? tx_conf->tx_free_thresh : AXGBE_TX_FREE_THRESH; @@ -523,7 +565,7 @@ int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, if (txq->nb_desc % txq->free_thresh != 0) txq->vector_disable = 1; - if (tx_conf->offloads != 0) + if (offloads != 0) txq->vector_disable = 1; /* Allocate TX ring hardware descriptors */ @@ -571,6 +613,34 @@ int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, return 0; } +int axgbe_dev_fw_version_get(struct rte_eth_dev *eth_dev, + char *fw_version, size_t fw_size) +{ + struct axgbe_port *pdata; + struct axgbe_hw_features *hw_feat; + int ret; + + pdata = (struct axgbe_port *)eth_dev->data->dev_private; + hw_feat = &pdata->hw_feat; + + if (fw_version == NULL) + return -EINVAL; + + ret = snprintf(fw_version, fw_size, "%d.%d.%d", + AXGMAC_GET_BITS(hw_feat->version, MAC_VR, USERVER), + AXGMAC_GET_BITS(hw_feat->version, MAC_VR, DEVID), + AXGMAC_GET_BITS(hw_feat->version, MAC_VR, SNPSVER)); + if (ret < 0) + return -EINVAL; + + ret += 1; /* add the size of '\0' */ + + if (fw_size < (size_t)ret) + return ret; + else + return 0; +} + static void axgbe_txq_prepare_tx_stop(struct axgbe_port *pdata, unsigned int queue) { @@ -745,10 +815,28 @@ static int axgbe_xmit_hw(struct axgbe_tx_queue *txq, AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CIC, 0x1); rte_wmb(); + if (mbuf->ol_flags & (PKT_TX_VLAN_PKT | PKT_TX_QINQ_PKT)) { + /* Mark it as a CONTEXT descriptor */ + AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3, + CTXT, 1); + /* Set the VLAN tag */ + AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3, + VT, mbuf->vlan_tci); + /* Indicate this descriptor contains the VLAN tag */ + AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3, + VLTV, 1); + AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, VTIR, + TX_NORMAL_DESC2_VLAN_INSERT); + } else { + AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, VTIR, 0x0); + } + rte_wmb(); + /* Set OWN bit */ AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN, 1); rte_wmb(); + /* Save mbuf */ txq->sw_ring[idx] = mbuf; /* Update current index*/