net/ngbe: support CRC offload
[dpdk.git] / drivers / net / ngbe / ngbe_rxtx.c
index d1b825d..6687d58 100644 (file)
@@ -965,7 +965,8 @@ ngbe_rx_scan_hw_ring(struct ngbe_rx_queue *rxq)
                /* Translate descriptor info to mbuf format */
                for (j = 0; j < nb_dd; ++j) {
                        mb = rxep[j].mbuf;
-                       pkt_len = rte_le_to_cpu_16(rxdp[j].qw1.hi.len);
+                       pkt_len = rte_le_to_cpu_16(rxdp[j].qw1.hi.len) -
+                                 rxq->crc_len;
                        mb->data_len = pkt_len;
                        mb->pkt_len = pkt_len;
 
@@ -1268,7 +1269,8 @@ ngbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
                 *    - IP checksum flag,
                 *    - error flags.
                 */
-               pkt_len = (uint16_t)(rte_le_to_cpu_16(rxd.qw1.hi.len));
+               pkt_len = (uint16_t)(rte_le_to_cpu_16(rxd.qw1.hi.len) -
+                                     rxq->crc_len);
                rxm->data_off = RTE_PKTMBUF_HEADROOM;
                rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
                rxm->nb_segs = 1;
@@ -1518,6 +1520,22 @@ next_desc:
                /* Initialize the first mbuf of the returned packet */
                ngbe_fill_cluster_head_buf(first_seg, &rxd, rxq, staterr);
 
+               /* Deal with the case, when HW CRC srip is disabled. */
+               first_seg->pkt_len -= rxq->crc_len;
+               if (unlikely(rxm->data_len <= rxq->crc_len)) {
+                       struct rte_mbuf *lp;
+
+                       for (lp = first_seg; lp->next != rxm; lp = lp->next)
+                               ;
+
+                       first_seg->nb_segs--;
+                       lp->data_len -= rxq->crc_len - rxm->data_len;
+                       lp->next = NULL;
+                       rte_pktmbuf_free_seg(rxm);
+               } else {
+                       rxm->data_len -= rxq->crc_len;
+               }
+
                /* Prefetch data of first segment, if configured to do so. */
                rte_packet_prefetch((char *)first_seg->buf_addr +
                        first_seg->data_off);
@@ -1689,6 +1707,34 @@ ngbe_set_tx_function(struct rte_eth_dev *dev, struct ngbe_tx_queue *txq)
        }
 }
 
+static const struct {
+       eth_tx_burst_t pkt_burst;
+       const char *info;
+} ngbe_tx_burst_infos[] = {
+       { ngbe_xmit_pkts_simple,   "Scalar Simple"},
+       { ngbe_xmit_pkts,          "Scalar"},
+};
+
+int
+ngbe_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
+                     struct rte_eth_burst_mode *mode)
+{
+       eth_tx_burst_t pkt_burst = dev->tx_pkt_burst;
+       int ret = -EINVAL;
+       unsigned int i;
+
+       for (i = 0; i < RTE_DIM(ngbe_tx_burst_infos); ++i) {
+               if (pkt_burst == ngbe_tx_burst_infos[i].pkt_burst) {
+                       snprintf(mode->info, sizeof(mode->info), "%s",
+                                ngbe_tx_burst_infos[i].info);
+                       ret = 0;
+                       break;
+               }
+       }
+
+       return ret;
+}
+
 uint64_t
 ngbe_get_tx_port_offloads(struct rte_eth_dev *dev)
 {
@@ -1986,6 +2032,7 @@ ngbe_get_rx_port_offloads(struct rte_eth_dev *dev __rte_unused)
        offloads = RTE_ETH_RX_OFFLOAD_IPV4_CKSUM  |
                   RTE_ETH_RX_OFFLOAD_UDP_CKSUM   |
                   RTE_ETH_RX_OFFLOAD_TCP_CKSUM   |
+                  RTE_ETH_RX_OFFLOAD_KEEP_CRC    |
                   RTE_ETH_RX_OFFLOAD_SCATTER;
 
        return offloads;
@@ -2026,6 +2073,10 @@ ngbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
        rxq->queue_id = queue_idx;
        rxq->reg_idx = queue_idx;
        rxq->port_id = dev->data->port_id;
+       if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
+               rxq->crc_len = RTE_ETHER_CRC_LEN;
+       else
+               rxq->crc_len = 0;
        rxq->drop_en = rx_conf->rx_drop_en;
        rxq->rx_deferred_start = rx_conf->rx_deferred_start;
 
@@ -2239,6 +2290,36 @@ ngbe_set_rx_function(struct rte_eth_dev *dev)
        }
 }
 
+static const struct {
+       eth_rx_burst_t pkt_burst;
+       const char *info;
+} ngbe_rx_burst_infos[] = {
+       { ngbe_recv_pkts_sc_single_alloc,    "Scalar Scattered"},
+       { ngbe_recv_pkts_sc_bulk_alloc,      "Scalar Scattered Bulk Alloc"},
+       { ngbe_recv_pkts_bulk_alloc,         "Scalar Bulk Alloc"},
+       { ngbe_recv_pkts,                    "Scalar"},
+};
+
+int
+ngbe_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
+                     struct rte_eth_burst_mode *mode)
+{
+       eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
+       int ret = -EINVAL;
+       unsigned int i;
+
+       for (i = 0; i < RTE_DIM(ngbe_rx_burst_infos); ++i) {
+               if (pkt_burst == ngbe_rx_burst_infos[i].pkt_burst) {
+                       snprintf(mode->info, sizeof(mode->info), "%s",
+                                ngbe_rx_burst_infos[i].info);
+                       ret = 0;
+                       break;
+               }
+       }
+
+       return ret;
+}
+
 /*
  * Initializes Receive Unit.
  */
@@ -2251,6 +2332,7 @@ ngbe_dev_rx_init(struct rte_eth_dev *dev)
        uint32_t fctrl;
        uint32_t hlreg0;
        uint32_t srrctl;
+       uint32_t rdrxctl;
        uint32_t rxcsum;
        uint16_t buf_size;
        uint16_t i;
@@ -2271,7 +2353,14 @@ ngbe_dev_rx_init(struct rte_eth_dev *dev)
        fctrl |= NGBE_PSRCTL_BCA;
        wr32(hw, NGBE_PSRCTL, fctrl);
 
+       /*
+        * Configure CRC stripping, if any.
+        */
        hlreg0 = rd32(hw, NGBE_SECRXCTL);
+       if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
+               hlreg0 &= ~NGBE_SECRXCTL_CRCSTRIP;
+       else
+               hlreg0 |= NGBE_SECRXCTL_CRCSTRIP;
        hlreg0 &= ~NGBE_SECRXCTL_XDSA;
        wr32(hw, NGBE_SECRXCTL, hlreg0);
 
@@ -2282,6 +2371,15 @@ ngbe_dev_rx_init(struct rte_eth_dev *dev)
        for (i = 0; i < dev->data->nb_rx_queues; i++) {
                rxq = dev->data->rx_queues[i];
 
+               /*
+                * Reset crc_len in case it was changed after queue setup by a
+                * call to configure.
+                */
+               if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
+                       rxq->crc_len = RTE_ETHER_CRC_LEN;
+               else
+                       rxq->crc_len = 0;
+
                /* Setup the Base and Length of the Rx Descriptor Rings */
                bus_addr = rxq->rx_ring_phys_addr;
                wr32(hw, NGBE_RXBAL(rxq->reg_idx),
@@ -2326,6 +2424,15 @@ ngbe_dev_rx_init(struct rte_eth_dev *dev)
 
        wr32(hw, NGBE_PSRCTL, rxcsum);
 
+       if (hw->is_pf) {
+               rdrxctl = rd32(hw, NGBE_SECRXCTL);
+               if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
+                       rdrxctl &= ~NGBE_SECRXCTL_CRCSTRIP;
+               else
+                       rdrxctl |= NGBE_SECRXCTL_CRCSTRIP;
+               wr32(hw, NGBE_SECRXCTL, rdrxctl);
+       }
+
        ngbe_set_rx_function(dev);
 
        return 0;