static void ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index);
static void ixgbe_set_default_mac_addr(struct rte_eth_dev *dev,
struct ether_addr *mac_addr);
-static void ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_config);
+static void ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config);
/* For Virtual Function support */
static int eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev);
/* For Eth VMDQ APIs support */
static int ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct
- ether_addr* mac_addr,uint8_t on);
-static int ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev,uint8_t on);
+ ether_addr * mac_addr, uint8_t on);
+static int ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on);
static int ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev, uint16_t pool,
uint16_t rx_mask, uint8_t on);
-static int ixgbe_set_pool_rx(struct rte_eth_dev *dev,uint16_t pool,uint8_t on);
-static int ixgbe_set_pool_tx(struct rte_eth_dev *dev,uint16_t pool,uint8_t on);
+static int ixgbe_set_pool_rx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on);
+static int ixgbe_set_pool_tx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on);
static int ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan,
- uint64_t pool_mask,uint8_t vlan_on);
+ uint64_t pool_mask, uint8_t vlan_on);
static int ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
struct rte_eth_mirror_conf *mirror_conf,
uint8_t rule_id, uint8_t on);
last = latest; \
}
-#define IXGBE_SET_HWSTRIP(h, q) do{\
- uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \
- uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \
+#define IXGBE_SET_HWSTRIP(h, q) do {\
+ uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
+ uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
(h)->bitmap[idx] |= 1 << bit;\
} while (0)
-#define IXGBE_CLEAR_HWSTRIP(h, q) do{\
- uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \
- uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \
+#define IXGBE_CLEAR_HWSTRIP(h, q) do {\
+ uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
+ uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
(h)->bitmap[idx] &= ~(1 << bit);\
} while (0)
-#define IXGBE_GET_HWSTRIP(h, q, r) do{\
- uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \
- uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \
+#define IXGBE_GET_HWSTRIP(h, q, r) do {\
+ uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
+ uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
(r) = (h)->bitmap[idx] >> bit & 1;\
} while (0)
PMD_INIT_LOG(DEBUG, "Write 0x%x to RX IXGBE stat mapping reg:%d",
stat_mappings->rqsmr[n], n);
IXGBE_WRITE_REG(hw, IXGBE_RQSMR(n), stat_mappings->rqsmr[n]);
- }
- else {
+ } else {
PMD_INIT_LOG(DEBUG, "Write 0x%x to TX IXGBE stat mapping reg:%d",
stat_mappings->tqsm[n], n);
IXGBE_WRITE_REG(hw, IXGBE_TQSM(n), stat_mappings->tqsm[n]);
}
static void
-ixgbe_restore_statistics_mapping(struct rte_eth_dev * dev)
+ixgbe_restore_statistics_mapping(struct rte_eth_dev *dev)
{
struct ixgbe_stat_mapping_registers *stat_mappings =
IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(dev->data->dev_private);
}
static void
-ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_config)
+ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config)
{
uint8_t i;
struct ixgbe_dcb_tc_config *tc;
tc = &dcb_config->tc_config[0];
tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
- for (i = 0; i< IXGBE_DCB_MAX_BW_GROUP; i++) {
+ for (i = 0; i < IXGBE_DCB_MAX_BW_GROUP; i++) {
dcb_config->bw_percentage[IXGBE_DCB_TX_CONFIG][i] = 100;
dcb_config->bw_percentage[IXGBE_DCB_RX_CONFIG][i] = 100;
}
struct rte_pci_device *pci_dev;
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
- struct ixgbe_vfta * shadow_vfta =
+ struct ixgbe_vfta *shadow_vfta =
IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
struct ixgbe_hwstrip *hwstrip =
IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private);
* has already done this work. Only check we don't need a different
* RX and TX function.
*/
- if (rte_eal_process_type() != RTE_PROC_PRIMARY){
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
struct ixgbe_tx_queue *txq;
/* TX queue function in primary, set by last queue initialized
- * Tx queue may not initialized by primary process */
+ * Tx queue may not initialized by primary process
+ */
if (eth_dev->data->tx_queues) {
txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues-1];
ixgbe_set_tx_function(eth_dev, txq);
} else {
/* Use default TX function if we get here */
PMD_INIT_LOG(NOTICE, "No TX queues configured yet. "
- "Using default TX function.");
+ "Using default TX function.");
}
ixgbe_set_rx_function(eth_dev);
/* Initialize DCB configuration*/
memset(dcb_config, 0, sizeof(struct ixgbe_dcb_config));
- ixgbe_dcb_init(hw,dcb_config);
+ ixgbe_dcb_init(hw, dcb_config);
/* Get Hardware Flow Control setting */
hw->fc.requested_mode = ixgbe_fc_full;
hw->fc.current_mode = ixgbe_fc_full;
if (diag == IXGBE_ERR_EEPROM_VERSION) {
PMD_INIT_LOG(ERR, "This device is a pre-production adapter/"
- "LOM. Please be aware there may be issues associated "
- "with your hardware.");
+ "LOM. Please be aware there may be issues associated "
+ "with your hardware.");
PMD_INIT_LOG(ERR, "If you are experiencing problems "
- "please contact your Intel or hardware representative "
- "who provided you with this hardware.");
+ "please contact your Intel or hardware representative "
+ "who provided you with this hardware.");
} else if (diag == IXGBE_ERR_SFP_NOT_SUPPORTED)
PMD_INIT_LOG(ERR, "Unsupported SFP+ Module");
if (diag) {
/* Allocate memory for storing MAC addresses */
eth_dev->data->mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN *
- hw->mac.num_rar_entries, 0);
+ hw->mac.num_rar_entries, 0);
if (eth_dev->data->mac_addrs == NULL) {
PMD_INIT_LOG(ERR,
- "Failed to allocate %u bytes needed to store "
- "MAC addresses",
- ETHER_ADDR_LEN * hw->mac.num_rar_entries);
+ "Failed to allocate %u bytes needed to store "
+ "MAC addresses",
+ ETHER_ADDR_LEN * hw->mac.num_rar_entries);
return -ENOMEM;
}
/* Copy the permanent MAC address */
/* Allocate memory for storing hash filter MAC addresses */
eth_dev->data->hash_mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN *
- IXGBE_VMDQ_NUM_UC_MAC, 0);
+ IXGBE_VMDQ_NUM_UC_MAC, 0);
if (eth_dev->data->hash_mac_addrs == NULL) {
PMD_INIT_LOG(ERR,
- "Failed to allocate %d bytes needed to store MAC addresses",
- ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC);
+ "Failed to allocate %d bytes needed to store MAC addresses",
+ ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC);
return -ENOMEM;
}
(int) hw->mac.type, (int) hw->phy.type);
PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
- eth_dev->data->port_id, pci_dev->id.vendor_id,
- pci_dev->id.device_id);
+ eth_dev->data->port_id, pci_dev->id.vendor_id,
+ pci_dev->id.device_id);
rte_intr_callback_register(&pci_dev->intr_handle,
ixgbe_dev_interrupt_handler,
/* initialize 5tuple filter list */
TAILQ_INIT(&filter_info->fivetuple_list);
memset(filter_info->fivetuple_mask, 0,
- sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE);
+ sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE);
return 0;
}
struct rte_pci_device *pci_dev;
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
- struct ixgbe_vfta * shadow_vfta =
+ struct ixgbe_vfta *shadow_vfta =
IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
struct ixgbe_hwstrip *hwstrip =
IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private);
/* for secondary processes, we don't initialise any further as primary
* has already done this work. Only check we don't need a different
- * RX function */
- if (rte_eal_process_type() != RTE_PROC_PRIMARY){
+ * RX function
+ */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
struct ixgbe_tx_queue *txq;
/* TX queue function in primary, set by last queue initialized
* Tx queue may not initialized by primary process
} else {
/* Use default TX function if we get here */
PMD_INIT_LOG(NOTICE,
- "No TX queues configured yet. Using default TX function.");
+ "No TX queues configured yet. Using default TX function.");
}
ixgbe_set_rx_function(eth_dev);
/* Allocate memory for storing MAC addresses */
eth_dev->data->mac_addrs = rte_zmalloc("ixgbevf", ETHER_ADDR_LEN *
- hw->mac.num_rar_entries, 0);
+ hw->mac.num_rar_entries, 0);
if (eth_dev->data->mac_addrs == NULL) {
PMD_INIT_LOG(ERR,
- "Failed to allocate %u bytes needed to store "
- "MAC addresses",
- ETHER_ADDR_LEN * hw->mac.num_rar_entries);
+ "Failed to allocate %u bytes needed to store "
+ "MAC addresses",
+ ETHER_ADDR_LEN * hw->mac.num_rar_entries);
return -ENOMEM;
}
/* reset the hardware with the new settings */
diag = hw->mac.ops.start_hw(hw);
switch (diag) {
- case 0:
- break;
+ case 0:
+ break;
- default:
- PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
- return -EIO;
+ default:
+ PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
+ return -EIO;
}
PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s",
{
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct ixgbe_vfta * shadow_vfta =
+ struct ixgbe_vfta *shadow_vfta =
IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
uint32_t vfta;
uint32_t vid_idx;
{
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct ixgbe_vfta * shadow_vfta =
+ struct ixgbe_vfta *shadow_vfta =
IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
uint32_t vlnctrl;
uint16_t i;
PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip");
return;
}
- else {
- /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
- ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
- ctrl &= ~IXGBE_RXDCTL_VME;
- IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
- }
+
+ /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
+ ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
+ ctrl &= ~IXGBE_RXDCTL_VME;
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
+
/* record those setting for HW strip per queue */
ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 0);
}
PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip");
return;
}
- else {
- /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
- ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
- ctrl |= IXGBE_RXDCTL_VME;
- IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
- }
+
+ /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
+ ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
+ ctrl |= IXGBE_RXDCTL_VME;
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
+
/* record those setting for HW strip per queue */
ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
}
ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
ctrl &= ~IXGBE_VLNCTRL_VME;
IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
- }
- else {
+ } else {
/* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
for (i = 0; i < dev->data->nb_rx_queues; i++) {
ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
ctrl |= IXGBE_VLNCTRL_VME;
IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
- }
- else {
+ } else {
/* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
for (i = 0; i < dev->data->nb_rx_queues; i++) {
ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
/* VLNCTRL: enable vlan filtering and allow all vlan tags through */
uint32_t vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+
vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
}
ixgbe_stop_adapter(hw);
/* reinitialize adapter
- * this calls reset and start */
+ * this calls reset and start
+ */
status = ixgbe_pf_reset_hw(hw);
if (status != 0)
return -1;
/* resume enabled intr since hw reset */
ixgbe_enable_intr(dev);
- mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
+ mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
ETH_VLAN_EXTEND_MASK;
ixgbe_vlan_offload_set(dev, mask);
hw_stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
for (i = 0; i < 8; i++) {
- uint32_t mp;
- mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
+ uint32_t mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
+
/* global total per queue */
hw_stats->mpc[i] += mp;
/* Running comprehensive total for stats display */
/* Rx Errors */
stats->imissed = total_missed_rx;
stats->ierrors = hw_stats->crcerrs +
- hw_stats->mspdc +
- hw_stats->rlec +
- hw_stats->ruc +
- hw_stats->roc +
- hw_stats->illerrc +
- hw_stats->errbc +
- hw_stats->rfc +
- hw_stats->fccrc +
- hw_stats->fclast;
+ hw_stats->mspdc +
+ hw_stats->rlec +
+ hw_stats->ruc +
+ hw_stats->roc +
+ hw_stats->illerrc +
+ hw_stats->errbc +
+ hw_stats->rfc +
+ hw_stats->fccrc +
+ hw_stats->fclast;
/* Tx Errors */
stats->oerrors = 0;
ixgbevf_update_stats(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats*)
+ struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
/* Good Rx packet, include VF loopback */
static void
ixgbevf_dev_stats_reset(struct rte_eth_dev *dev)
{
- struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats*)
+ struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
/* Sync HW register to the last stats */
if (intr_enable_delay) {
if (rte_eal_alarm_set(timeout * 1000,
- ixgbe_dev_interrupt_delayed_handler, (void*)dev) < 0)
+ ixgbe_dev_interrupt_delayed_handler, (void *)dev) < 0)
PMD_DRV_LOG(ERR, "Error setting alarm");
} else {
PMD_DRV_LOG(DEBUG, "enable intr immediately");
* Enable flow control according to the current settings.
*/
static int
-ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw,uint8_t tc_num)
+ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw, uint8_t tc_num)
{
int ret_val = 0;
uint32_t mflcn_reg, fccfg_reg;
* and the TX pause can not be disabled
*/
nb_rx_en = 0;
- for (i =0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+ for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
if (reg & IXGBE_FCRTH_FCEN)
nb_rx_en++;
}
if (nb_rx_en > 1)
- fccfg_reg |=IXGBE_FCCFG_TFCE_PRIORITY;
+ fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
break;
case ixgbe_fc_rx_pause:
/*
* and the TX pause can not be disabled
*/
nb_rx_en = 0;
- for (i =0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+ for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
if (reg & IXGBE_FCRTH_FCEN)
nb_rx_en++;
}
if (nb_rx_en > 1)
- fccfg_reg |=IXGBE_FCCFG_TFCE_PRIORITY;
+ fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
break;
case ixgbe_fc_tx_pause:
/*
* Tx Flow control is enabled, and Rx Flow control is
* disabled by software override.
*/
- fccfg_reg |=IXGBE_FCCFG_TFCE_PRIORITY;
+ fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
break;
case ixgbe_fc_full:
/* Flow control (both Rx and Tx) is enabled by SW override. */
PMD_DRV_LOG(DEBUG, "Flow control param set incorrectly");
ret_val = IXGBE_ERR_CONFIG;
goto out;
- break;
}
/* Set 802.3x based flow control settings. */
}
static int
-ixgbe_dcb_pfc_enable(struct rte_eth_dev *dev,uint8_t tc_num)
+ixgbe_dcb_pfc_enable(struct rte_eth_dev *dev, uint8_t tc_num)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
int32_t ret_val = IXGBE_NOT_IMPLEMENTED;
if (hw->mac.type != ixgbe_mac_82598EB) {
- ret_val = ixgbe_dcb_pfc_enable_generic(hw,tc_num);
+ ret_val = ixgbe_dcb_pfc_enable_generic(hw, tc_num);
}
return ret_val;
}
uint8_t tc_num;
uint8_t map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 };
struct ixgbe_hw *hw =
- IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct ixgbe_dcb_config *dcb_config =
- IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
+ IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = {
ixgbe_fc_none,
hw->fc.low_water[tc_num] = pfc_conf->fc.low_water;
hw->fc.high_water[tc_num] = pfc_conf->fc.high_water;
- err = ixgbe_dcb_pfc_enable(dev,tc_num);
+ err = ixgbe_dcb_pfc_enable(dev, tc_num);
/* Not negotiated is not an error case */
if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED))
return -EINVAL;
/* refuse mtu that requires the support of scattered packets when this
- * feature has not been enabled before. */
+ * feature has not been enabled before.
+ */
if (!dev->data->scattered_rx &&
(frame_size + 2 * IXGBE_VLAN_TAG_SIZE >
dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
static int
ixgbevf_dev_configure(struct rte_eth_dev *dev)
{
- struct rte_eth_conf* conf = &dev->data->dev_conf;
+ struct rte_eth_conf *conf = &dev->data->dev_conf;
struct ixgbe_adapter *adapter =
(struct ixgbe_adapter *)dev->data->dev_private;
}
/* Set vfta */
- ixgbevf_set_vfta_all(dev,1);
+ ixgbevf_set_vfta_all(dev, 1);
/* Set HW strip */
- mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
+ mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
ETH_VLAN_EXTEND_MASK;
ixgbevf_vlan_offload_set(dev, mask);
* Clear what we set, but we still keep shadow_vfta to
* restore after device starts
*/
- ixgbevf_set_vfta_all(dev,0);
+ ixgbevf_set_vfta_all(dev, 0);
/* Clear stored conf */
dev->data->scattered_rx = 0;
static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct ixgbe_vfta * shadow_vfta =
+ struct ixgbe_vfta *shadow_vfta =
IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
int i = 0, j = 0, vfta = 0, mask = 1;
- for (i = 0; i < IXGBE_VFTA_SIZE; i++){
+ for (i = 0; i < IXGBE_VFTA_SIZE; i++) {
vfta = shadow_vfta->vfta[i];
if (vfta) {
mask = 1;
- for (j = 0; j < 32; j++){
+ for (j = 0; j < 32; j++) {
if (vfta & mask)
ixgbe_set_vfta(hw, (i<<5)+j, 0, on);
- mask<<=1;
+ mask <<= 1;
}
}
}
{
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct ixgbe_vfta * shadow_vfta =
+ struct ixgbe_vfta *shadow_vfta =
IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
uint32_t vid_idx = 0;
uint32_t vid_bit = 0;
ctrl &= ~IXGBE_RXDCTL_VME;
IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
- ixgbe_vlan_hw_strip_bitmap_set( dev, queue, on);
+ ixgbe_vlan_hw_strip_bitmap_set(dev, queue, on);
}
static void
on = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
for (i = 0; i < hw->mac.max_rx_queues; i++)
- ixgbevf_vlan_strip_queue_set(dev,i,on);
+ ixgbevf_vlan_strip_queue_set(dev, i, on);
}
}
}
static uint32_t
-ixgbe_uta_vector(struct ixgbe_hw *hw, struct ether_addr* uc_addr)
+ixgbe_uta_vector(struct ixgbe_hw *hw, struct ether_addr *uc_addr)
{
uint32_t vector = 0;
+
switch (hw->mac.mc_filter_type) {
case 0: /* use bits [47:36] of the address */
vector = ((uc_addr->addr_bytes[4] >> 4) |
}
static int
-ixgbe_uc_hash_table_set(struct rte_eth_dev *dev,struct ether_addr* mac_addr,
- uint8_t on)
+ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
+ uint8_t on)
{
uint32_t vector;
uint32_t uta_idx;
if (hw->mac.type < ixgbe_mac_82599EB)
return -ENOTSUP;
- vector = ixgbe_uta_vector(hw,mac_addr);
+ vector = ixgbe_uta_vector(hw, mac_addr);
uta_idx = (vector >> ixgbe_uta_bit_shift) & ixgbe_uta_idx_mask;
uta_shift = vector & ixgbe_uta_bit_mask;
IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
else
- IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,hw->mac.mc_filter_type);
+ IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
return 0;
}
static int
ixgbe_set_pool_rx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on)
{
- uint32_t reg,addr;
+ uint32_t reg, addr;
uint32_t val;
const uint8_t bit1 = 0x1;
else
reg &= ~val;
- IXGBE_WRITE_REG(hw, addr,reg);
+ IXGBE_WRITE_REG(hw, addr, reg);
return 0;
}
static int
ixgbe_set_pool_tx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on)
{
- uint32_t reg,addr;
+ uint32_t reg, addr;
uint32_t val;
const uint8_t bit1 = 0x1;
else
reg &= ~val;
- IXGBE_WRITE_REG(hw, addr,reg);
+ IXGBE_WRITE_REG(hw, addr, reg);
return 0;
}
return -ENOTSUP;
for (pool_idx = 0; pool_idx < ETH_64_POOLS; pool_idx++) {
if (pool_mask & ((uint64_t)(1ULL << pool_idx))) {
- ret = hw->mac.ops.set_vfta(hw,vlan,pool_idx,vlan_on);
+ ret = hw->mac.ops.set_vfta(hw, vlan, pool_idx, vlan_on);
if (ret < 0)
return ret;
}
struct rte_eth_mirror_conf *mirror_conf,
uint8_t rule_id, uint8_t on)
{
- uint32_t mr_ctl,vlvf;
+ uint32_t mr_ctl, vlvf;
uint32_t mp_lsb = 0;
uint32_t mv_msb = 0;
uint32_t mv_lsb = 0;
const uint8_t vlan_mask_offset = 32;
const uint8_t dst_pool_offset = 8;
const uint8_t rule_mr_offset = 4;
- const uint8_t mirror_rule_mask= 0x0F;
+ const uint8_t mirror_rule_mask = 0x0F;
struct ixgbe_mirror_info *mr_info =
(IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
if (mirror_conf->rule_type & ETH_MIRROR_VLAN) {
mirror_type |= IXGBE_MRCTL_VLME;
/* Check if vlan id is valid and find conresponding VLAN ID index in VLVF */
- for (i = 0;i < IXGBE_VLVF_ENTRIES; i++) {
+ for (i = 0; i < IXGBE_VLVF_ENTRIES; i++) {
if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
/* search vlan id related pool vlan filter index */
reg_index = ixgbe_find_vlvf_slot(hw,
return -EINVAL;
/* refuse mtu that requires the support of scattered packets when this
- * feature has not been enabled before. */
+ * feature has not been enabled before.
+ */
if (!dev->data->scattered_rx &&
(max_frame + 2 * IXGBE_VLAN_TAG_SIZE >
dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))