ethdev: MTU accessors
[dpdk.git] / lib / librte_pmd_ixgbe / ixgbe_ethdev.c
index 40c8343..fca8fd7 100644 (file)
@@ -119,6 +119,9 @@ static int ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
                                             uint8_t is_rx);
 static void ixgbe_dev_info_get(struct rte_eth_dev *dev,
                                struct rte_eth_dev_info *dev_info);
+
+static int ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
+
 static int ixgbe_vlan_filter_set(struct rte_eth_dev *dev,
                uint16_t vlan_id, int on);
 static void ixgbe_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid_id);
@@ -134,8 +137,10 @@ static void ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev);
 
 static int ixgbe_dev_led_on(struct rte_eth_dev *dev);
 static int ixgbe_dev_led_off(struct rte_eth_dev *dev);
-static int  ixgbe_flow_ctrl_set(struct rte_eth_dev *dev,
-               struct rte_eth_fc_conf *fc_conf);
+static int ixgbe_flow_ctrl_get(struct rte_eth_dev *dev,
+                              struct rte_eth_fc_conf *fc_conf);
+static int ixgbe_flow_ctrl_set(struct rte_eth_dev *dev,
+                              struct rte_eth_fc_conf *fc_conf);
 static int ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev,
                struct rte_eth_pfc_conf *pfc_conf);
 static int ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
@@ -291,6 +296,7 @@ static struct eth_dev_ops ixgbe_eth_dev_ops = {
        .stats_reset          = ixgbe_dev_stats_reset,
        .queue_stats_mapping_set = ixgbe_dev_queue_stats_mapping_set,
        .dev_infos_get        = ixgbe_dev_info_get,
+       .mtu_set              = ixgbe_dev_mtu_set,
        .vlan_filter_set      = ixgbe_vlan_filter_set,
        .vlan_tpid_set        = ixgbe_vlan_tpid_set,
        .vlan_offload_set     = ixgbe_vlan_offload_set,
@@ -307,6 +313,7 @@ static struct eth_dev_ops ixgbe_eth_dev_ops = {
        .tx_queue_release     = ixgbe_dev_tx_queue_release,
        .dev_led_on           = ixgbe_dev_led_on,
        .dev_led_off          = ixgbe_dev_led_off,
+       .flow_ctrl_get        = ixgbe_flow_ctrl_get,
        .flow_ctrl_set        = ixgbe_flow_ctrl_set,
        .priority_flow_ctrl_set = ixgbe_priority_flow_ctrl_set,
        .mac_addr_add         = ixgbe_add_rar,
@@ -1818,9 +1825,15 @@ ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
        }
 
        /* Rx Errors */
-       stats->ierrors = total_missed_rx + hw_stats->crcerrs +
-               hw_stats->rlec;
-
+       stats->ibadcrc  = hw_stats->crcerrs;
+       stats->ibadlen  = hw_stats->rlec + hw_stats->ruc + hw_stats->roc;
+       stats->imissed  = total_missed_rx;
+       stats->ierrors  = stats->ibadcrc +
+                         stats->ibadlen +
+                         stats->imissed +
+                         hw_stats->illerrc + hw_stats->errbc;
+
+       /* Tx Errors */
        stats->oerrors  = 0;
 
        /* XON/XOFF pause frames */
@@ -2285,6 +2298,55 @@ ixgbe_dev_led_off(struct rte_eth_dev *dev)
        return (ixgbe_led_off(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP);
 }
 
+static int
+ixgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
+{
+       struct ixgbe_hw *hw;
+       uint32_t mflcn_reg;
+       uint32_t fccfg_reg;
+       int rx_pause;
+       int tx_pause;
+
+       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       fc_conf->pause_time = hw->fc.pause_time;
+       fc_conf->high_water = hw->fc.high_water[0];
+       fc_conf->low_water = hw->fc.low_water[0];
+       fc_conf->send_xon = hw->fc.send_xon;
+       fc_conf->autoneg = !hw->fc.disable_fc_autoneg;
+
+       /*
+        * Return rx_pause status according to actual setting of
+        * MFLCN register.
+        */
+       mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
+       if (mflcn_reg & (IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_RFCE))
+               rx_pause = 1;
+       else
+               rx_pause = 0;
+
+       /*
+        * Return tx_pause status according to actual setting of
+        * FCCFG register.
+        */
+       fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
+       if (fccfg_reg & (IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY))
+               tx_pause = 1;
+       else
+               tx_pause = 0;
+
+       if (rx_pause && tx_pause)
+               fc_conf->mode = RTE_FC_FULL;
+       else if (rx_pause)
+               fc_conf->mode = RTE_FC_RX_PAUSE;
+       else if (tx_pause)
+               fc_conf->mode = RTE_FC_TX_PAUSE;
+       else
+               fc_conf->mode = RTE_FC_NONE;
+
+       return 0;
+}
+
 static int
 ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 {
@@ -2303,6 +2365,8 @@ ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
        PMD_INIT_FUNC_TRACE();
 
        hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       if (fc_conf->autoneg != !hw->fc.disable_fc_autoneg)
+               return -ENOTSUP;
        rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0));
        PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x \n", rx_buf_size);
 
@@ -2646,6 +2710,52 @@ ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
        ixgbe_clear_rar(hw, index);
 }
 
+static int
+ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+{
+       uint32_t hlreg0;
+       uint32_t maxfrs;
+       struct ixgbe_hw *hw;
+       struct rte_eth_dev_info dev_info;
+       uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+
+       ixgbe_dev_info_get(dev, &dev_info);
+
+       /* check that mtu is within the allowed range */
+       if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen))
+               return -EINVAL;
+
+       /* refuse mtu that requires the support of scattered packets when this
+        * feature has not been enabled before. */
+       if (!dev->data->scattered_rx &&
+           (frame_size + 2 * IXGBE_VLAN_TAG_SIZE >
+            dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
+               return -EINVAL;
+
+       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+
+       /* switch to jumbo mode if needed */
+       if (frame_size > ETHER_MAX_LEN) {
+               dev->data->dev_conf.rxmode.jumbo_frame = 1;
+               hlreg0 |= IXGBE_HLREG0_JUMBOEN;
+       } else {
+               dev->data->dev_conf.rxmode.jumbo_frame = 0;
+               hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
+       }
+       IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
+
+       /* update max frame size */
+       dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
+
+       maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
+       maxfrs &= 0x0000FFFF;
+       maxfrs |= (dev->data->dev_conf.rxmode.max_rx_pkt_len << 16);
+       IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs);
+
+       return 0;
+}
+
 /*
  * Virtual Function operations
  */