uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \
uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \
(h)->bitmap[idx] |= 1 << bit;\
- }while(0)
+ } while (0)
#define IXGBE_CLEAR_HWSTRIP(h, q) do{\
uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \
uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \
(h)->bitmap[idx] &= ~(1 << bit);\
- }while(0)
+ } while (0)
#define IXGBE_GET_HWSTRIP(h, q, r) do{\
uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \
uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \
(r) = (h)->bitmap[idx] >> bit & 1;\
- }while(0)
+ } while (0)
/*
* The set of PCI devices this driver supports
{
uint32_t i;
- for(i = 0; i != IXGBE_NB_STAT_MAPPING_REGS; i++) {
+ for (i = 0; i != IXGBE_NB_STAT_MAPPING_REGS; i++) {
IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), 0);
IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), 0);
}
struct ixgbe_hwstrip *hwstrip =
IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(dev->data->dev_private);
- if(queue >= IXGBE_MAX_RX_QUEUE_NUM)
+ if (queue >= IXGBE_MAX_RX_QUEUE_NUM)
return;
if (on)
static void
ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
{
- if(mask & ETH_VLAN_STRIP_MASK){
+ if (mask & ETH_VLAN_STRIP_MASK) {
if (dev->data->dev_conf.rxmode.hw_vlan_strip)
ixgbe_vlan_hw_strip_enable_all(dev);
else
ixgbe_vlan_hw_strip_disable_all(dev);
}
- if(mask & ETH_VLAN_FILTER_MASK){
+ if (mask & ETH_VLAN_FILTER_MASK) {
if (dev->data->dev_conf.rxmode.hw_vlan_filter)
ixgbe_vlan_hw_filter_enable(dev);
else
ixgbe_vlan_hw_filter_disable(dev);
}
- if(mask & ETH_VLAN_EXTEND_MASK){
+ if (mask & ETH_VLAN_EXTEND_MASK) {
if (dev->data->dev_conf.rxmode.hw_vlan_extend)
ixgbe_vlan_hw_extend_enable(dev);
else
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
/* VLNCTRL: enable vlan filtering and allow all vlan tags through */
uint32_t vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
- vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */
+ vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
}
/* Low water mark of zero causes XOFF floods */
if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
/* High/Low water can not be 0 */
- if( (!hw->fc.high_water[tc_num])|| (!hw->fc.low_water[tc_num])) {
+ if ((!hw->fc.high_water[tc_num]) || (!hw->fc.low_water[tc_num])) {
PMD_INIT_LOG(ERR, "Invalid water mark configuration");
ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
goto out;
}
- if(hw->fc.low_water[tc_num] >= hw->fc.high_water[tc_num]) {
+ if (hw->fc.low_water[tc_num] >= hw->fc.high_water[tc_num]) {
PMD_INIT_LOG(ERR, "Invalid water mark configuration");
ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
goto out;
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
int32_t ret_val = IXGBE_NOT_IMPLEMENTED;
- if(hw->mac.type != ixgbe_mac_82598EB) {
+ if (hw->mac.type != ixgbe_mac_82598EB) {
ret_val = ixgbe_dcb_pfc_enable_generic(hw,tc_num);
}
return ret_val;
for (i = 0; i < IXGBE_VFTA_SIZE; i++){
vfta = shadow_vfta->vfta[i];
- if(vfta){
+ if (vfta) {
mask = 1;
for (j = 0; j < 32; j++){
- if(vfta & mask)
+ if (vfta & mask)
ixgbe_set_vfta(hw, (i<<5)+j, 0, on);
mask<<=1;
}
/* vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf */
ret = ixgbe_set_vfta(hw, vlan_id, 0, !!on);
- if(ret){
+ if (ret) {
PMD_INIT_LOG(ERR, "Unable to set VF vlan");
return ret;
}
PMD_INIT_FUNC_TRACE();
- if(queue >= hw->mac.max_rx_queues)
+ if (queue >= hw->mac.max_rx_queues)
return;
ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
- if(on)
+ if (on)
ctrl |= IXGBE_RXDCTL_VME;
else
ctrl &= ~IXGBE_RXDCTL_VME;
int on = 0;
/* VF function only support hw strip feature, others are not support */
- if(mask & ETH_VLAN_STRIP_MASK){
+ if (mask & ETH_VLAN_STRIP_MASK) {
on = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
- for(i=0; i < hw->mac.max_rx_queues; i++)
+ for (i = 0; i < hw->mac.max_rx_queues; i++)
ixgbevf_vlan_strip_queue_set(dev,i,on);
}
}
uta_shift = vector & ixgbe_uta_bit_mask;
rc = ((uta_info->uta_shadow[uta_idx] >> uta_shift & bit1) != 0);
- if(rc == on)
+ if (rc == on)
return 0;
reg_val = IXGBE_READ_REG(hw, IXGBE_UTA(uta_idx));
if (hw->mac.type < ixgbe_mac_82599EB)
return -ENOTSUP;
- if(on) {
+ if (on) {
for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
uta_info->uta_shadow[i] = ~0;
IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0);
/* search vlan id related pool vlan filter index */
reg_index = ixgbe_find_vlvf_slot(hw,
mirror_conf->vlan.vlan_id[i]);
- if(reg_index < 0)
+ if (reg_index < 0)
return -EINVAL;
vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(reg_index));
if ((vlvf & IXGBE_VLVF_VIEN) &&
mr_info->mr_conf[rule_id].vlan.vlan_mask =
mirror_conf->vlan.vlan_mask;
- for(i = 0 ;i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) {
- if(mirror_conf->vlan.vlan_mask & (1ULL << i))
+ for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) {
+ if (mirror_conf->vlan.vlan_mask & (1ULL << i))
mr_info->mr_conf[rule_id].vlan.vlan_id[i] =
mirror_conf->vlan.vlan_id[i];
}
mv_lsb = 0;
mv_msb = 0;
mr_info->mr_conf[rule_id].vlan.vlan_mask = 0;
- for(i = 0 ;i < ETH_VMDQ_MAX_VLAN_FILTERS; i++)
+ for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++)
mr_info->mr_conf[rule_id].vlan.vlan_id[i] = 0;
}
}
*/
#define rte_ixgbe_prefetch(p) rte_prefetch0(p)
#else
-#define rte_ixgbe_prefetch(p) do {} while(0)
+#define rte_ixgbe_prefetch(p) do {} while (0)
#endif
/*********************************************************************
pbsize = (uint16_t)(NIC_RX_BUFFER_SIZE / nb_tcs);
break;
}
- for (i = 0 ; i < nb_tcs; i++) {
+ for (i = 0; i < nb_tcs; i++) {
uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
rxpbsize &= (~(0x3FF << IXGBE_RXPBSIZE_SHIFT));
/* clear 10 bits. */
/* VLNCTRL: enable vlan filtering and allow all vlan tags through */
vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
- vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */
+ vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
/* VFTA - enable all vlan filters */
/* VLNCTRL: enable vlan filtering and allow all vlan tags through */
vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
- vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */
+ vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
/* VFTA - enable all vlan filters */
nb_tcs = dcb_config->num_tcs.pfc_tcs;
/* Unpack map */
ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map);
- if(nb_tcs == ETH_4_TCS) {
+ if (nb_tcs == ETH_4_TCS) {
/* Avoid un-configured priority mapping to TC0 */
uint8_t j = 4;
uint8_t mask = 0xFF;
break;
}
- if(config_dcb_rx) {
+ if (config_dcb_rx) {
/* Set RX buffer size */
pbsize = (uint16_t)(rx_buffer_size / nb_tcs);
uint32_t rxpbsize = pbsize << IXGBE_RXPBSIZE_SHIFT;
- for (i = 0 ; i < nb_tcs; i++) {
+ for (i = 0; i < nb_tcs; i++) {
IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
}
/* zero alloc all unused TCs */
IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
}
}
- if(config_dcb_tx) {
+ if (config_dcb_tx) {
/* Only support an equally distributed Tx packet buffer strategy. */
uint32_t txpktsize = IXGBE_TXPBSIZE_MAX / nb_tcs;
uint32_t txpbthresh = (txpktsize / DCB_TX_PB) - IXGBE_TXPKT_SIZE_MAX;
ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config,max_frame,
IXGBE_DCB_RX_CONFIG);
- if(config_dcb_rx) {
+ if (config_dcb_rx) {
/* Unpack CEE standard containers */
ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_RX_CONFIG, refill);
ixgbe_dcb_unpack_max_cee(dcb_config, max);
ixgbe_dcb_hw_arbite_rx_config(hw,refill,max,bwgid,tsa,map);
}
- if(config_dcb_tx) {
+ if (config_dcb_tx) {
/* Unpack CEE standard containers */
ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, refill);
ixgbe_dcb_unpack_max_cee(dcb_config, max);
ixgbe_dcb_config_tc_stats_82599(hw, dcb_config);
/* Check if the PFC is supported */
- if(dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
+ if (dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
pbsize = (uint16_t)(rx_buffer_size / nb_tcs);
for (i = 0; i < nb_tcs; i++) {
/*
tc->pfc = ixgbe_dcb_pfc_enabled;
}
ixgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en);
- if(dcb_config->num_tcs.pfc_tcs == ETH_4_TCS)
+ if (dcb_config->num_tcs.pfc_tcs == ETH_4_TCS)
pfc_en &= 0x0F;
ret = ixgbe_dcb_config_pfc(hw, pfc_en, map);
}
/* VLNCTRL: enable vlan filtering and allow all vlan tags through */
vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
- vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */
+ vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
/* VFTA - enable all vlan filters */