{
int rc;
- if ((rc = ixgbe_init_hw(hw)) == 0 &&
- hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
+ rc = ixgbe_init_hw(hw);
+ if (rc == 0 && hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
hw->mac.ops.setup_link =
&ixgbe_setup_mac_link_multispeed_fixed_fiber;
hw->mac.ops.get_media_type = &ixgbe_bypass_get_media_type;
hw->mac.ops.disable_tx_laser = NULL;
- hw->mac.ops.enable_tx_laser = NULL;
- hw->mac.ops.flap_tx_laser = NULL;
+ hw->mac.ops.enable_tx_laser = NULL;
+ hw->mac.ops.flap_tx_laser = NULL;
}
return rc;
BYPASS_CTL1_VALID_M |
BYPASS_CTL1_OFFTRST_M;
value = (sec & BYPASS_CTL1_TIME_M) |
- BYPASS_CTL1_VALID |
+ BYPASS_CTL1_VALID |
BYPASS_CTL1_OFFTRST;
FUNC_PTR_OR_RET(adapter->bps.ops.bypass_set);
ixgbe_bypass_wd_timeout_store(struct rte_eth_dev *dev, u32 timeout)
{
struct ixgbe_hw *hw;
- u32 status;
- u32 mask;
+ u32 status;
+ u32 mask;
s32 ret_val;
struct ixgbe_adapter *adapter = IXGBE_DEV_TO_ADPATER(dev);
#ifdef RTE_NIC_BYPASS
struct ixgbe_bypass_mac_ops {
- s32 (*bypass_rw) (struct ixgbe_hw *hw, u32 cmd, u32 *status);
- bool (*bypass_valid_rd) (u32 in_reg, u32 out_reg);
- s32 (*bypass_set) (struct ixgbe_hw *hw, u32 cmd, u32 event, u32 action);
- s32 (*bypass_rd_eep) (struct ixgbe_hw *hw, u32 addr, u8 *value);
+ s32 (*bypass_rw)(struct ixgbe_hw *hw, u32 cmd, u32 *status);
+ bool (*bypass_valid_rd)(u32 in_reg, u32 out_reg);
+ s32 (*bypass_set)(struct ixgbe_hw *hw, u32 cmd, u32 event, u32 action);
+ s32 (*bypass_rd_eep)(struct ixgbe_hw *hw, u32 addr, u8 *value);
};
struct ixgbe_bypass_info {
#define BYPASS_LOG_EVENT_SHIFT 28
#define BYPASS_LOG_CLEAR_SHIFT 24 /* bit offset */
#define IXGBE_DEV_TO_ADPATER(dev) \
- ((struct ixgbe_adapter*)(dev->data->dev_private))
+ ((struct ixgbe_adapter *)(dev->data->dev_private))
/* extractions from ixgbe_phy.h */
#define IXGBE_I2C_EEPROM_DEV_ADDR2 0xA2
static void ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index);
static void ixgbe_set_default_mac_addr(struct rte_eth_dev *dev,
struct ether_addr *mac_addr);
-static void ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_config);
+static void ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config);
/* For Virtual Function support */
static int eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev);
/* For Eth VMDQ APIs support */
static int ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct
- ether_addr* mac_addr,uint8_t on);
-static int ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev,uint8_t on);
+ ether_addr * mac_addr, uint8_t on);
+static int ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on);
static int ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev, uint16_t pool,
uint16_t rx_mask, uint8_t on);
-static int ixgbe_set_pool_rx(struct rte_eth_dev *dev,uint16_t pool,uint8_t on);
-static int ixgbe_set_pool_tx(struct rte_eth_dev *dev,uint16_t pool,uint8_t on);
+static int ixgbe_set_pool_rx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on);
+static int ixgbe_set_pool_tx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on);
static int ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan,
- uint64_t pool_mask,uint8_t vlan_on);
+ uint64_t pool_mask, uint8_t vlan_on);
static int ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
struct rte_eth_mirror_conf *mirror_conf,
uint8_t rule_id, uint8_t on);
last = latest; \
}
-#define IXGBE_SET_HWSTRIP(h, q) do{\
- uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \
- uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \
+#define IXGBE_SET_HWSTRIP(h, q) do {\
+ uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
+ uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
(h)->bitmap[idx] |= 1 << bit;\
} while (0)
-#define IXGBE_CLEAR_HWSTRIP(h, q) do{\
- uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \
- uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \
+#define IXGBE_CLEAR_HWSTRIP(h, q) do {\
+ uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
+ uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
(h)->bitmap[idx] &= ~(1 << bit);\
} while (0)
-#define IXGBE_GET_HWSTRIP(h, q, r) do{\
- uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \
- uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \
+#define IXGBE_GET_HWSTRIP(h, q, r) do {\
+ uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
+ uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
(r) = (h)->bitmap[idx] >> bit & 1;\
} while (0)
PMD_INIT_LOG(DEBUG, "Write 0x%x to RX IXGBE stat mapping reg:%d",
stat_mappings->rqsmr[n], n);
IXGBE_WRITE_REG(hw, IXGBE_RQSMR(n), stat_mappings->rqsmr[n]);
- }
- else {
+ } else {
PMD_INIT_LOG(DEBUG, "Write 0x%x to TX IXGBE stat mapping reg:%d",
stat_mappings->tqsm[n], n);
IXGBE_WRITE_REG(hw, IXGBE_TQSM(n), stat_mappings->tqsm[n]);
}
static void
-ixgbe_restore_statistics_mapping(struct rte_eth_dev * dev)
+ixgbe_restore_statistics_mapping(struct rte_eth_dev *dev)
{
struct ixgbe_stat_mapping_registers *stat_mappings =
IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(dev->data->dev_private);
}
static void
-ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_config)
+ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config)
{
uint8_t i;
struct ixgbe_dcb_tc_config *tc;
tc = &dcb_config->tc_config[0];
tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
- for (i = 0; i< IXGBE_DCB_MAX_BW_GROUP; i++) {
+ for (i = 0; i < IXGBE_DCB_MAX_BW_GROUP; i++) {
dcb_config->bw_percentage[IXGBE_DCB_TX_CONFIG][i] = 100;
dcb_config->bw_percentage[IXGBE_DCB_RX_CONFIG][i] = 100;
}
struct rte_pci_device *pci_dev;
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
- struct ixgbe_vfta * shadow_vfta =
+ struct ixgbe_vfta *shadow_vfta =
IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
struct ixgbe_hwstrip *hwstrip =
IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private);
* has already done this work. Only check we don't need a different
* RX and TX function.
*/
- if (rte_eal_process_type() != RTE_PROC_PRIMARY){
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
struct ixgbe_tx_queue *txq;
/* TX queue function in primary, set by last queue initialized
- * Tx queue may not initialized by primary process */
+ * Tx queue may not initialized by primary process
+ */
if (eth_dev->data->tx_queues) {
txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues-1];
ixgbe_set_tx_function(eth_dev, txq);
} else {
/* Use default TX function if we get here */
PMD_INIT_LOG(NOTICE, "No TX queues configured yet. "
- "Using default TX function.");
+ "Using default TX function.");
}
ixgbe_set_rx_function(eth_dev);
/* Initialize DCB configuration*/
memset(dcb_config, 0, sizeof(struct ixgbe_dcb_config));
- ixgbe_dcb_init(hw,dcb_config);
+ ixgbe_dcb_init(hw, dcb_config);
/* Get Hardware Flow Control setting */
hw->fc.requested_mode = ixgbe_fc_full;
hw->fc.current_mode = ixgbe_fc_full;
if (diag == IXGBE_ERR_EEPROM_VERSION) {
PMD_INIT_LOG(ERR, "This device is a pre-production adapter/"
- "LOM. Please be aware there may be issues associated "
- "with your hardware.");
+ "LOM. Please be aware there may be issues associated "
+ "with your hardware.");
PMD_INIT_LOG(ERR, "If you are experiencing problems "
- "please contact your Intel or hardware representative "
- "who provided you with this hardware.");
+ "please contact your Intel or hardware representative "
+ "who provided you with this hardware.");
} else if (diag == IXGBE_ERR_SFP_NOT_SUPPORTED)
PMD_INIT_LOG(ERR, "Unsupported SFP+ Module");
if (diag) {
/* Allocate memory for storing MAC addresses */
eth_dev->data->mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN *
- hw->mac.num_rar_entries, 0);
+ hw->mac.num_rar_entries, 0);
if (eth_dev->data->mac_addrs == NULL) {
PMD_INIT_LOG(ERR,
- "Failed to allocate %u bytes needed to store "
- "MAC addresses",
- ETHER_ADDR_LEN * hw->mac.num_rar_entries);
+ "Failed to allocate %u bytes needed to store "
+ "MAC addresses",
+ ETHER_ADDR_LEN * hw->mac.num_rar_entries);
return -ENOMEM;
}
/* Copy the permanent MAC address */
/* Allocate memory for storing hash filter MAC addresses */
eth_dev->data->hash_mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN *
- IXGBE_VMDQ_NUM_UC_MAC, 0);
+ IXGBE_VMDQ_NUM_UC_MAC, 0);
if (eth_dev->data->hash_mac_addrs == NULL) {
PMD_INIT_LOG(ERR,
- "Failed to allocate %d bytes needed to store MAC addresses",
- ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC);
+ "Failed to allocate %d bytes needed to store MAC addresses",
+ ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC);
return -ENOMEM;
}
(int) hw->mac.type, (int) hw->phy.type);
PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
- eth_dev->data->port_id, pci_dev->id.vendor_id,
- pci_dev->id.device_id);
+ eth_dev->data->port_id, pci_dev->id.vendor_id,
+ pci_dev->id.device_id);
rte_intr_callback_register(&pci_dev->intr_handle,
ixgbe_dev_interrupt_handler,
/* initialize 5tuple filter list */
TAILQ_INIT(&filter_info->fivetuple_list);
memset(filter_info->fivetuple_mask, 0,
- sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE);
+ sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE);
return 0;
}
struct rte_pci_device *pci_dev;
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
- struct ixgbe_vfta * shadow_vfta =
+ struct ixgbe_vfta *shadow_vfta =
IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
struct ixgbe_hwstrip *hwstrip =
IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private);
/* for secondary processes, we don't initialise any further as primary
* has already done this work. Only check we don't need a different
- * RX function */
- if (rte_eal_process_type() != RTE_PROC_PRIMARY){
+ * RX function
+ */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
struct ixgbe_tx_queue *txq;
/* TX queue function in primary, set by last queue initialized
* Tx queue may not initialized by primary process
} else {
/* Use default TX function if we get here */
PMD_INIT_LOG(NOTICE,
- "No TX queues configured yet. Using default TX function.");
+ "No TX queues configured yet. Using default TX function.");
}
ixgbe_set_rx_function(eth_dev);
/* Allocate memory for storing MAC addresses */
eth_dev->data->mac_addrs = rte_zmalloc("ixgbevf", ETHER_ADDR_LEN *
- hw->mac.num_rar_entries, 0);
+ hw->mac.num_rar_entries, 0);
if (eth_dev->data->mac_addrs == NULL) {
PMD_INIT_LOG(ERR,
- "Failed to allocate %u bytes needed to store "
- "MAC addresses",
- ETHER_ADDR_LEN * hw->mac.num_rar_entries);
+ "Failed to allocate %u bytes needed to store "
+ "MAC addresses",
+ ETHER_ADDR_LEN * hw->mac.num_rar_entries);
return -ENOMEM;
}
/* reset the hardware with the new settings */
diag = hw->mac.ops.start_hw(hw);
switch (diag) {
- case 0:
- break;
+ case 0:
+ break;
- default:
- PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
- return -EIO;
+ default:
+ PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
+ return -EIO;
}
PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s",
{
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct ixgbe_vfta * shadow_vfta =
+ struct ixgbe_vfta *shadow_vfta =
IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
uint32_t vfta;
uint32_t vid_idx;
{
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct ixgbe_vfta * shadow_vfta =
+ struct ixgbe_vfta *shadow_vfta =
IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
uint32_t vlnctrl;
uint16_t i;
PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip");
return;
}
- else {
- /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
- ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
- ctrl &= ~IXGBE_RXDCTL_VME;
- IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
- }
+
+ /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
+ ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
+ ctrl &= ~IXGBE_RXDCTL_VME;
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
+
/* record those setting for HW strip per queue */
ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 0);
}
PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip");
return;
}
- else {
- /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
- ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
- ctrl |= IXGBE_RXDCTL_VME;
- IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
- }
+
+ /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
+ ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
+ ctrl |= IXGBE_RXDCTL_VME;
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
+
/* record those setting for HW strip per queue */
ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
}
ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
ctrl &= ~IXGBE_VLNCTRL_VME;
IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
- }
- else {
+ } else {
/* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
for (i = 0; i < dev->data->nb_rx_queues; i++) {
ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
ctrl |= IXGBE_VLNCTRL_VME;
IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
- }
- else {
+ } else {
/* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
for (i = 0; i < dev->data->nb_rx_queues; i++) {
ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
/* VLNCTRL: enable vlan filtering and allow all vlan tags through */
uint32_t vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+
vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
}
ixgbe_stop_adapter(hw);
/* reinitialize adapter
- * this calls reset and start */
+ * this calls reset and start
+ */
status = ixgbe_pf_reset_hw(hw);
if (status != 0)
return -1;
/* resume enabled intr since hw reset */
ixgbe_enable_intr(dev);
- mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
+ mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
ETH_VLAN_EXTEND_MASK;
ixgbe_vlan_offload_set(dev, mask);
hw_stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
for (i = 0; i < 8; i++) {
- uint32_t mp;
- mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
+ uint32_t mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
+
/* global total per queue */
hw_stats->mpc[i] += mp;
/* Running comprehensive total for stats display */
/* Rx Errors */
stats->imissed = total_missed_rx;
stats->ierrors = hw_stats->crcerrs +
- hw_stats->mspdc +
- hw_stats->rlec +
- hw_stats->ruc +
- hw_stats->roc +
- hw_stats->illerrc +
- hw_stats->errbc +
- hw_stats->rfc +
- hw_stats->fccrc +
- hw_stats->fclast;
+ hw_stats->mspdc +
+ hw_stats->rlec +
+ hw_stats->ruc +
+ hw_stats->roc +
+ hw_stats->illerrc +
+ hw_stats->errbc +
+ hw_stats->rfc +
+ hw_stats->fccrc +
+ hw_stats->fclast;
/* Tx Errors */
stats->oerrors = 0;
ixgbevf_update_stats(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats*)
+ struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
/* Good Rx packet, include VF loopback */
static void
ixgbevf_dev_stats_reset(struct rte_eth_dev *dev)
{
- struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats*)
+ struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
/* Sync HW register to the last stats */
if (intr_enable_delay) {
if (rte_eal_alarm_set(timeout * 1000,
- ixgbe_dev_interrupt_delayed_handler, (void*)dev) < 0)
+ ixgbe_dev_interrupt_delayed_handler, (void *)dev) < 0)
PMD_DRV_LOG(ERR, "Error setting alarm");
} else {
PMD_DRV_LOG(DEBUG, "enable intr immediately");
* Enable flow control according to the current settings.
*/
static int
-ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw,uint8_t tc_num)
+ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw, uint8_t tc_num)
{
int ret_val = 0;
uint32_t mflcn_reg, fccfg_reg;
* and the TX pause can not be disabled
*/
nb_rx_en = 0;
- for (i =0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+ for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
if (reg & IXGBE_FCRTH_FCEN)
nb_rx_en++;
}
if (nb_rx_en > 1)
- fccfg_reg |=IXGBE_FCCFG_TFCE_PRIORITY;
+ fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
break;
case ixgbe_fc_rx_pause:
/*
* and the TX pause can not be disabled
*/
nb_rx_en = 0;
- for (i =0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+ for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
if (reg & IXGBE_FCRTH_FCEN)
nb_rx_en++;
}
if (nb_rx_en > 1)
- fccfg_reg |=IXGBE_FCCFG_TFCE_PRIORITY;
+ fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
break;
case ixgbe_fc_tx_pause:
/*
* Tx Flow control is enabled, and Rx Flow control is
* disabled by software override.
*/
- fccfg_reg |=IXGBE_FCCFG_TFCE_PRIORITY;
+ fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
break;
case ixgbe_fc_full:
/* Flow control (both Rx and Tx) is enabled by SW override. */
PMD_DRV_LOG(DEBUG, "Flow control param set incorrectly");
ret_val = IXGBE_ERR_CONFIG;
goto out;
- break;
}
/* Set 802.3x based flow control settings. */
}
static int
-ixgbe_dcb_pfc_enable(struct rte_eth_dev *dev,uint8_t tc_num)
+ixgbe_dcb_pfc_enable(struct rte_eth_dev *dev, uint8_t tc_num)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
int32_t ret_val = IXGBE_NOT_IMPLEMENTED;
if (hw->mac.type != ixgbe_mac_82598EB) {
- ret_val = ixgbe_dcb_pfc_enable_generic(hw,tc_num);
+ ret_val = ixgbe_dcb_pfc_enable_generic(hw, tc_num);
}
return ret_val;
}
uint8_t tc_num;
uint8_t map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 };
struct ixgbe_hw *hw =
- IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct ixgbe_dcb_config *dcb_config =
- IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
+ IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = {
ixgbe_fc_none,
hw->fc.low_water[tc_num] = pfc_conf->fc.low_water;
hw->fc.high_water[tc_num] = pfc_conf->fc.high_water;
- err = ixgbe_dcb_pfc_enable(dev,tc_num);
+ err = ixgbe_dcb_pfc_enable(dev, tc_num);
/* Not negotiated is not an error case */
if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED))
return -EINVAL;
/* refuse mtu that requires the support of scattered packets when this
- * feature has not been enabled before. */
+ * feature has not been enabled before.
+ */
if (!dev->data->scattered_rx &&
(frame_size + 2 * IXGBE_VLAN_TAG_SIZE >
dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
static int
ixgbevf_dev_configure(struct rte_eth_dev *dev)
{
- struct rte_eth_conf* conf = &dev->data->dev_conf;
+ struct rte_eth_conf *conf = &dev->data->dev_conf;
struct ixgbe_adapter *adapter =
(struct ixgbe_adapter *)dev->data->dev_private;
}
/* Set vfta */
- ixgbevf_set_vfta_all(dev,1);
+ ixgbevf_set_vfta_all(dev, 1);
/* Set HW strip */
- mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
+ mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
ETH_VLAN_EXTEND_MASK;
ixgbevf_vlan_offload_set(dev, mask);
* Clear what we set, but we still keep shadow_vfta to
* restore after device starts
*/
- ixgbevf_set_vfta_all(dev,0);
+ ixgbevf_set_vfta_all(dev, 0);
/* Clear stored conf */
dev->data->scattered_rx = 0;
static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct ixgbe_vfta * shadow_vfta =
+ struct ixgbe_vfta *shadow_vfta =
IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
int i = 0, j = 0, vfta = 0, mask = 1;
- for (i = 0; i < IXGBE_VFTA_SIZE; i++){
+ for (i = 0; i < IXGBE_VFTA_SIZE; i++) {
vfta = shadow_vfta->vfta[i];
if (vfta) {
mask = 1;
- for (j = 0; j < 32; j++){
+ for (j = 0; j < 32; j++) {
if (vfta & mask)
ixgbe_set_vfta(hw, (i<<5)+j, 0, on);
- mask<<=1;
+ mask <<= 1;
}
}
}
{
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct ixgbe_vfta * shadow_vfta =
+ struct ixgbe_vfta *shadow_vfta =
IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
uint32_t vid_idx = 0;
uint32_t vid_bit = 0;
ctrl &= ~IXGBE_RXDCTL_VME;
IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
- ixgbe_vlan_hw_strip_bitmap_set( dev, queue, on);
+ ixgbe_vlan_hw_strip_bitmap_set(dev, queue, on);
}
static void
on = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
for (i = 0; i < hw->mac.max_rx_queues; i++)
- ixgbevf_vlan_strip_queue_set(dev,i,on);
+ ixgbevf_vlan_strip_queue_set(dev, i, on);
}
}
}
static uint32_t
-ixgbe_uta_vector(struct ixgbe_hw *hw, struct ether_addr* uc_addr)
+ixgbe_uta_vector(struct ixgbe_hw *hw, struct ether_addr *uc_addr)
{
uint32_t vector = 0;
+
switch (hw->mac.mc_filter_type) {
case 0: /* use bits [47:36] of the address */
vector = ((uc_addr->addr_bytes[4] >> 4) |
}
static int
-ixgbe_uc_hash_table_set(struct rte_eth_dev *dev,struct ether_addr* mac_addr,
- uint8_t on)
+ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
+ uint8_t on)
{
uint32_t vector;
uint32_t uta_idx;
if (hw->mac.type < ixgbe_mac_82599EB)
return -ENOTSUP;
- vector = ixgbe_uta_vector(hw,mac_addr);
+ vector = ixgbe_uta_vector(hw, mac_addr);
uta_idx = (vector >> ixgbe_uta_bit_shift) & ixgbe_uta_idx_mask;
uta_shift = vector & ixgbe_uta_bit_mask;
IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
else
- IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,hw->mac.mc_filter_type);
+ IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
return 0;
}
static int
ixgbe_set_pool_rx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on)
{
- uint32_t reg,addr;
+ uint32_t reg, addr;
uint32_t val;
const uint8_t bit1 = 0x1;
else
reg &= ~val;
- IXGBE_WRITE_REG(hw, addr,reg);
+ IXGBE_WRITE_REG(hw, addr, reg);
return 0;
}
static int
ixgbe_set_pool_tx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on)
{
- uint32_t reg,addr;
+ uint32_t reg, addr;
uint32_t val;
const uint8_t bit1 = 0x1;
else
reg &= ~val;
- IXGBE_WRITE_REG(hw, addr,reg);
+ IXGBE_WRITE_REG(hw, addr, reg);
return 0;
}
return -ENOTSUP;
for (pool_idx = 0; pool_idx < ETH_64_POOLS; pool_idx++) {
if (pool_mask & ((uint64_t)(1ULL << pool_idx))) {
- ret = hw->mac.ops.set_vfta(hw,vlan,pool_idx,vlan_on);
+ ret = hw->mac.ops.set_vfta(hw, vlan, pool_idx, vlan_on);
if (ret < 0)
return ret;
}
struct rte_eth_mirror_conf *mirror_conf,
uint8_t rule_id, uint8_t on)
{
- uint32_t mr_ctl,vlvf;
+ uint32_t mr_ctl, vlvf;
uint32_t mp_lsb = 0;
uint32_t mv_msb = 0;
uint32_t mv_lsb = 0;
const uint8_t vlan_mask_offset = 32;
const uint8_t dst_pool_offset = 8;
const uint8_t rule_mr_offset = 4;
- const uint8_t mirror_rule_mask= 0x0F;
+ const uint8_t mirror_rule_mask = 0x0F;
struct ixgbe_mirror_info *mr_info =
(IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
if (mirror_conf->rule_type & ETH_MIRROR_VLAN) {
mirror_type |= IXGBE_MRCTL_VLME;
/* Check if vlan id is valid and find conresponding VLAN ID index in VLVF */
- for (i = 0;i < IXGBE_VLVF_ENTRIES; i++) {
+ for (i = 0; i < IXGBE_VLVF_ENTRIES; i++) {
if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
/* search vlan id related pool vlan filter index */
reg_index = ixgbe_find_vlvf_slot(hw,
return -EINVAL;
/* refuse mtu that requires the support of scattered packets when this
- * feature has not been enabled before. */
+ * feature has not been enabled before.
+ */
if (!dev->data->scattered_rx &&
(max_frame + 2 * IXGBE_VLAN_TAG_SIZE >
dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
IXGBE_WRITE_FLUSH(hw);
for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
- IXGBE_FDIRCTRL_INIT_DONE)
+ IXGBE_FDIRCTRL_INIT_DONE)
break;
msec_delay(1);
}
if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
- PMD_INIT_LOG(ERR, "Flow Director poll time exceeded "
- "during enabling!");
+ PMD_INIT_LOG(ERR, "Flow Director poll time exceeded during enabling!");
return -ETIMEDOUT;
}
return 0;
reverse_fdir_bitmasks(uint16_t hi_dword, uint16_t lo_dword)
{
uint32_t mask = hi_dword << 16;
+
mask |= lo_dword;
mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
/* Process bits 0 and 16 */
- if (key & 0x0001) hash_result ^= lo_hash_dword;
- if (key & 0x00010000) hash_result ^= hi_hash_dword;
+ if (key & 0x0001)
+ hash_result ^= lo_hash_dword;
+ if (key & 0x00010000)
+ hash_result ^= hi_hash_dword;
/*
* apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
/* process the remaining 30 bits in the key 2 bits at a time */
- for (i = 15; i; i-- ) {
- if (key & (0x0001 << i)) hash_result ^= lo_hash_dword >> i;
- if (key & (0x00010000 << i)) hash_result ^= hi_hash_dword >> i;
+ for (i = 15; i; i--) {
+ if (key & (0x0001 << i))
+ hash_result ^= lo_hash_dword >> i;
+ if (key & (0x00010000 << i))
+ hash_result ^= hi_hash_dword >> i;
}
return hash_result;
/* configure FDIRCMD register */
fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW |
- IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
+ IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
fdircmd |= (uint32_t)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
*/
static int
ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
- const struct rte_eth_fdir_filter *fdir_filter,
- bool del,
- bool update)
+ const struct rte_eth_fdir_filter *fdir_filter,
+ bool del,
+ bool update)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t fdircmd_flags;
bool is_perfect = FALSE;
int err;
struct ixgbe_hw_fdir_info *info =
- IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
+ IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
if (fdir_mode == RTE_FDIR_MODE_NONE)
hw->mac.type == ixgbe_mac_X550EM_x ||
hw->mac.type == ixgbe_mac_X550EM_a) &&
(fdir_filter->input.flow_type ==
- RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) &&
+ RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) &&
(info->mask.src_port_mask != 0 ||
info->mask.dst_port_mask != 0)) {
PMD_DRV_LOG(ERR, "By this device,"
- " IPv4-other is not supported without"
- " L4 protocol and ports masked!");
+ " IPv4-other is not supported without"
+ " L4 protocol and ports masked!");
return -ENOTSUP;
}
if (is_perfect) {
if (input.formatted.flow_type & IXGBE_ATR_L4TYPE_IPV6_MASK) {
PMD_DRV_LOG(ERR, "IPv6 is not supported in"
- " perfect mode!");
+ " perfect mode!");
return -ENOTSUP;
}
fdirhash = atr_compute_perfect_hash_82599(&input,
- dev->data->dev_conf.fdir_conf.pballoc);
+ dev->data->dev_conf.fdir_conf.pballoc);
fdirhash |= fdir_filter->soft_id <<
- IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
+ IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
} else
fdirhash = atr_compute_sig_hash_82599(&input,
- dev->data->dev_conf.fdir_conf.pballoc);
+ dev->data->dev_conf.fdir_conf.pballoc);
if (del) {
err = fdir_erase_filter_82599(hw, fdirhash);
fdircmd_flags |= IXGBE_FDIRCMD_DROP;
} else {
PMD_DRV_LOG(ERR, "Drop option is not supported in"
- " signature mode.");
+ " signature mode.");
return -EINVAL;
}
} else if (fdir_filter->action.behavior == RTE_ETH_FDIR_ACCEPT &&
- fdir_filter->action.rx_queue < IXGBE_MAX_RX_QUEUE_NUM)
+ fdir_filter->action.rx_queue < IXGBE_MAX_RX_QUEUE_NUM)
queue = (uint8_t)fdir_filter->action.rx_queue;
else
return -EINVAL;
if (is_perfect) {
err = fdir_write_perfect_filter_82599(hw, &input, queue,
- fdircmd_flags, fdirhash,
- fdir_mode);
+ fdircmd_flags, fdirhash,
+ fdir_mode);
} else {
err = fdir_add_signature_filter_82599(hw, &input, queue,
- fdircmd_flags, fdirhash);
+ fdircmd_flags, fdirhash);
}
if (err < 0)
PMD_DRV_LOG(ERR, "Fail to add FDIR filter!");
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct ixgbe_hw_fdir_info *info =
- IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
+ IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
uint32_t reg, max_num;
enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
/* Get the information from registers */
reg = IXGBE_READ_REG(hw, IXGBE_FDIRFREE);
info->collision = (uint16_t)((reg & IXGBE_FDIRFREE_COLL_MASK) >>
- IXGBE_FDIRFREE_COLL_SHIFT);
+ IXGBE_FDIRFREE_COLL_SHIFT);
info->free = (uint16_t)((reg & IXGBE_FDIRFREE_FREE_MASK) >>
- IXGBE_FDIRFREE_FREE_SHIFT);
+ IXGBE_FDIRFREE_FREE_SHIFT);
reg = IXGBE_READ_REG(hw, IXGBE_FDIRLEN);
info->maxhash = (uint16_t)((reg & IXGBE_FDIRLEN_MAXHASH_MASK) >>
- IXGBE_FDIRLEN_MAXHASH_SHIFT);
+ IXGBE_FDIRLEN_MAXHASH_SHIFT);
info->maxlen = (uint8_t)((reg & IXGBE_FDIRLEN_MAXLEN_MASK) >>
- IXGBE_FDIRLEN_MAXLEN_SHIFT);
+ IXGBE_FDIRLEN_MAXLEN_SHIFT);
reg = IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT);
info->remove += (reg & IXGBE_FDIRUSTAT_REMOVE_MASK) >>
reg = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
max_num = (1 << (FDIRENTRIES_NUM_SHIFT +
- (reg & FDIRCTRL_PBALLOC_MASK)));
+ (reg & FDIRCTRL_PBALLOC_MASK)));
if (fdir_mode >= RTE_FDIR_MODE_PERFECT &&
fdir_mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
- fdir_stats->guarant_cnt = max_num - fdir_stats->free;
+ fdir_stats->guarant_cnt = max_num - fdir_stats->free;
else if (fdir_mode == RTE_FDIR_MODE_SIGNATURE)
fdir_stats->guarant_cnt = max_num * 4 - fdir_stats->free;
struct ixgbe_vf_info **vfinfo =
IXGBE_DEV_PRIVATE_TO_P_VFDATA(eth_dev->data->dev_private);
struct ixgbe_mirror_info *mirror_info =
- IXGBE_DEV_PRIVATE_TO_PFDATA(eth_dev->data->dev_private);
+ IXGBE_DEV_PRIVATE_TO_PFDATA(eth_dev->data->dev_private);
struct ixgbe_uta_info *uta_info =
- IXGBE_DEV_PRIVATE_TO_UTA(eth_dev->data->dev_private);
+ IXGBE_DEV_PRIVATE_TO_UTA(eth_dev->data->dev_private);
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
uint16_t vf_num;
PMD_INIT_FUNC_TRACE();
RTE_ETH_DEV_SRIOV(eth_dev).active = 0;
- if (0 == (vf_num = dev_num_vf(eth_dev)))
+ vf_num = dev_num_vf(eth_dev);
+ if (vf_num == 0)
return;
*vfinfo = rte_zmalloc("vf_info", sizeof(struct ixgbe_vf_info) * vf_num, 0);
if (*vfinfo == NULL)
rte_panic("Cannot allocate memory for private VF data\n");
- memset(mirror_info,0,sizeof(struct ixgbe_mirror_info));
- memset(uta_info,0,sizeof(struct ixgbe_uta_info));
+ memset(mirror_info, 0, sizeof(struct ixgbe_mirror_info));
+ memset(uta_info, 0, sizeof(struct ixgbe_uta_info));
hw->mac.mc_filter_type = 0;
if (vf_num >= ETH_32_POOLS) {
/* set mb interrupt mask */
ixgbe_mb_intr_setup(eth_dev);
-
- return;
}
void ixgbe_pf_host_uninit(struct rte_eth_dev *eth_dev)
uint32_t vlanctrl;
int i;
- if (0 == (vf_num = dev_num_vf(eth_dev)))
+ vf_num = dev_num_vf(eth_dev);
+ if (vf_num == 0)
return -1;
/* enable VMDq and set the default pool for PF */
}
IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
- IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
+ IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
- /*
+ /*
* enable vlan filtering and allow all vlan tags through
*/
- vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
- vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
- IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
+ vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+ vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
- /* VFTA - enable all vlan filters */
- for (i = 0; i < IXGBE_MAX_VFTA; i++) {
- IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
- }
+ /* VFTA - enable all vlan filters */
+ for (i = 0; i < IXGBE_MAX_VFTA; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
/* Enable MAC Anti-Spoofing */
hw->mac.ops.set_mac_anti_spoofing(hw, FALSE, vf_num);
int rar_entry = hw->mac.num_rar_entries - (vf + 1);
uint8_t *new_mac = (uint8_t *)(&msgbuf[1]);
- if (is_valid_assigned_ether_addr((struct ether_addr*)new_mac)) {
+ if (is_valid_assigned_ether_addr((struct ether_addr *)new_mac)) {
rte_memcpy(vfinfo[vf].vf_mac_addresses, new_mac, 6);
return hw->mac.ops.set_rar(hw, rar_entry, new_mac, vf, IXGBE_RAH_AV);
}
/* perform VF reset */
if (msgbuf[0] == IXGBE_VF_RESET) {
int ret = ixgbe_vf_reset(dev, vf, msgbuf);
+
vfinfo[vf].clear_to_send = true;
return ret;
}
nb_tx = 0;
while (nb_pkts) {
uint16_t ret, n;
+
n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_IXGBE_TX_MAX_BURST);
ret = tx_xmit_pkts(tx_queue, &(tx_pkts[nb_tx]), n);
nb_tx = (uint16_t)(nb_tx + ret);
*/
static inline uint32_t
what_advctx_update(struct ixgbe_tx_queue *txq, uint64_t flags,
- union ixgbe_tx_offload tx_offload)
+ union ixgbe_tx_offload tx_offload)
{
/* If match with the current used context */
if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
- (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] ==
- (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0]
- & tx_offload.data[0])) &&
- (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] ==
- (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1]
- & tx_offload.data[1])))) {
- return txq->ctx_curr;
- }
+ (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] ==
+ (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0]
+ & tx_offload.data[0])) &&
+ (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] ==
+ (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1]
+ & tx_offload.data[1]))))
+ return txq->ctx_curr;
/* What if match with the next context */
txq->ctx_curr ^= 1;
if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
- (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] ==
- (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0]
- & tx_offload.data[0])) &&
- (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] ==
- (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1]
- & tx_offload.data[1])))) {
- return txq->ctx_curr;
- }
+ (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] ==
+ (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0]
+ & tx_offload.data[0])) &&
+ (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] ==
+ (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1]
+ & tx_offload.data[1]))))
+ return txq->ctx_curr;
/* Mismatch, use the previous context */
return IXGBE_CTX_NUM;
tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
{
uint32_t tmp = 0;
+
if ((ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM)
tmp |= IXGBE_ADVTXD_POPTS_TXSM;
if (ol_flags & PKT_TX_IP_CKSUM)
tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags)
{
uint32_t cmdtype = 0;
+
if (ol_flags & PKT_TX_VLAN_PKT)
cmdtype |= IXGBE_ADVTXD_DCMD_VLE;
if (ol_flags & PKT_TX_TCP_SEG)
/* Check to make sure the last descriptor to clean is done */
desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
status = txr[desc_to_clean_to].wb.status;
- if (!(status & rte_cpu_to_le_32(IXGBE_TXD_STAT_DD)))
- {
+ if (!(status & rte_cpu_to_le_32(IXGBE_TXD_STAT_DD))) {
PMD_TX_FREE_LOG(DEBUG,
"TX descriptor %4u is not done"
"(port=%d queue=%d)",
* reference packets that are ready to be received.
*/
for (i = 0; i < RTE_PMD_IXGBE_RX_MAX_BURST;
- i += LOOK_AHEAD, rxdp += LOOK_AHEAD, rxep += LOOK_AHEAD)
- {
+ i += LOOK_AHEAD, rxdp += LOOK_AHEAD, rxep += LOOK_AHEAD) {
/* Read desc statuses backwards to avoid race condition */
for (j = LOOK_AHEAD-1; j >= 0; --j)
s[j] = rte_le_to_cpu_32(rxdp[j].wb.upper.status_error);
if (ixgbe_rx_alloc_bufs(rxq, true) != 0) {
int i, j;
+
PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
"queue_id=%u", (unsigned) rxq->port_id,
(unsigned) rxq->queue_id);
nb_rx = 0;
while (nb_pkts) {
uint16_t ret, n;
+
n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_IXGBE_RX_MAX_BURST);
ret = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
nb_rx = (uint16_t)(nb_rx + ret);
rx_mbuf_alloc_failed++;
break;
}
- }
- else if (nb_hold > rxq->rx_free_thresh) {
+ } else if (nb_hold > rxq->rx_free_thresh) {
uint16_t next_rdt = rxq->rx_free_trigger;
if (!ixgbe_rx_alloc_bufs(rxq, false)) {
prev = (uint16_t) (txq->nb_tx_desc - 1);
for (i = 0; i < txq->nb_tx_desc; i++) {
volatile union ixgbe_adv_tx_desc *txd = &txq->tx_ring[i];
+
txd->wb.status = rte_cpu_to_le_32(IXGBE_TXD_STAT_DD);
txe[i].mbuf = NULL;
txe[i].last_id = i;
txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
txq->ctx_curr = 0;
- memset((void*)&txq->ctx_cache, 0,
+ memset((void *)&txq->ctx_cache, 0,
IXGBE_CTX_NUM * sizeof(struct ixgbe_advctx_info));
}
if (rxq->rx_nb_avail) {
for (i = 0; i < rxq->rx_nb_avail; ++i) {
struct rte_mbuf *mb;
+
mb = rxq->rx_stage[rxq->rx_next_avail + i];
rte_pktmbuf_free_seg(mb);
}
/*
* Zero init all the descriptors in the ring.
*/
- memset (rz->addr, 0, RX_RING_SZ);
+ memset(rz->addr, 0, RX_RING_SZ);
/*
* Modified to setup VFRDT for Virtual Function
IXGBE_PCI_REG_ADDR(hw, IXGBE_VFRDT(queue_idx));
rxq->rdh_reg_addr =
IXGBE_PCI_REG_ADDR(hw, IXGBE_VFRDH(queue_idx));
- }
- else {
+ } else {
rxq->rdt_reg_addr =
IXGBE_PCI_REG_ADDR(hw, IXGBE_RDT(rxq->reg_idx));
rxq->rdh_reg_addr =
for (i = 0; i < dev->data->nb_tx_queues; i++) {
struct ixgbe_tx_queue *txq = dev->data->tx_queues[i];
+
if (txq != NULL) {
txq->ops->release_mbufs(txq);
txq->ops->reset(txq);
for (i = 0; i < dev->data->nb_rx_queues; i++) {
struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
+
if (rxq != NULL) {
ixgbe_rx_queue_release_mbufs(rxq);
ixgbe_reset_rx_queue(adapter, rxq);
}
for (i = 0; i < nb_tcs; i++) {
uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
+
rxpbsize &= (~(0x3FF << IXGBE_RXPBSIZE_SHIFT));
/* clear 10 bits. */
rxpbsize |= (pbsize << IXGBE_RXPBSIZE_SHIFT); /* set value */
/* zero alloc all unused TCs */
for (i = nb_tcs; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
- rxpbsize &= (~( 0x3FF << IXGBE_RXPBSIZE_SHIFT ));
+
+ rxpbsize &= (~(0x3FF << IXGBE_RXPBSIZE_SHIFT));
/* clear 10 bits. */
IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
}
/* MRQC: enable vmdq and dcb */
- mrqc = ((num_pools == ETH_16_POOLS) ? \
- IXGBE_MRQC_VMDQRT8TCEN : IXGBE_MRQC_VMDQRT4TCEN );
+ mrqc = (num_pools == ETH_16_POOLS) ?
+ IXGBE_MRQC_VMDQRT8TCEN : IXGBE_MRQC_VMDQRT4TCEN;
IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
/* PFVTCTL: turn on virtualisation and set the default pool */
}
/* VFRE: pool enabling for receive - 16 or 32 */
- IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), \
+ IXGBE_WRITE_REG(hw, IXGBE_VFRE(0),
num_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
/*
/* PFVLVF, PFVLVFB: set up filters for vlan tags as configured */
for (i = 0; i < cfg->nb_pool_maps; i++) {
/* set vlan id in VF register and set the valid bit */
- IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), (IXGBE_VLVF_VIEN | \
+ IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), (IXGBE_VLVF_VIEN |
(cfg->pool_map[i].vlan_id & 0xFFF)));
/*
* Put the allowed pools in VFB reg. As we only have 16 or 32
*/
static void
ixgbe_dcb_tx_hw_config(struct ixgbe_hw *hw,
- struct ixgbe_dcb_config *dcb_config)
+ struct ixgbe_dcb_config *dcb_config)
{
uint32_t reg;
uint32_t q;
/* Enable DCB for Tx with 8 TCs */
if (dcb_config->num_tcs.pg_tcs == 8) {
reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
- }
- else {
+ } else {
reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
}
if (dcb_config->vt_mode)
- reg |= IXGBE_MTQC_VT_ENA;
+ reg |= IXGBE_MTQC_VT_ENA;
IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
/* Disable drop for all queues */
for (q = 0; q < 128; q++)
IXGBE_WRITE_REG(hw, IXGBE_QDE,
- (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT)));
+ (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT)));
/* Enable the Tx desc arbiter */
reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
reg |= IXGBE_SECTX_DCB;
IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
}
- return;
}
/**
vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
/*Configure general DCB TX parameters*/
- ixgbe_dcb_tx_hw_config(hw,dcb_config);
- return;
+ ixgbe_dcb_tx_hw_config(hw, dcb_config);
}
static void
ixgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev,
- struct ixgbe_dcb_config *dcb_config)
+ struct ixgbe_dcb_config *dcb_config)
{
struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
&dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
struct ixgbe_dcb_tc_config *tc;
- uint8_t i,j;
+ uint8_t i, j;
/* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */
- if (vmdq_rx_conf->nb_queue_pools == ETH_16_POOLS ) {
+ if (vmdq_rx_conf->nb_queue_pools == ETH_16_POOLS) {
dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
- }
- else {
+ } else {
dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
}
static void
ixgbe_dcb_vt_tx_config(struct rte_eth_dev *dev,
- struct ixgbe_dcb_config *dcb_config)
+ struct ixgbe_dcb_config *dcb_config)
{
struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
&dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf;
struct ixgbe_dcb_tc_config *tc;
- uint8_t i,j;
+ uint8_t i, j;
/* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */
- if (vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS ) {
+ if (vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS) {
dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
- }
- else {
+ } else {
dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
}
tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap =
(uint8_t)(1 << j);
}
- return;
}
static void
struct rte_eth_dcb_rx_conf *rx_conf =
&dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
struct ixgbe_dcb_tc_config *tc;
- uint8_t i,j;
+ uint8_t i, j;
dcb_config->num_tcs.pg_tcs = (uint8_t)rx_conf->nb_tcs;
dcb_config->num_tcs.pfc_tcs = (uint8_t)rx_conf->nb_tcs;
struct rte_eth_dcb_tx_conf *tx_conf =
&dev->data->dev_conf.tx_adv_conf.dcb_tx_conf;
struct ixgbe_dcb_tc_config *tc;
- uint8_t i,j;
+ uint8_t i, j;
dcb_config->num_tcs.pg_tcs = (uint8_t)tx_conf->nb_tcs;
dcb_config->num_tcs.pfc_tcs = (uint8_t)tx_conf->nb_tcs;
*/
static void
ixgbe_dcb_rx_hw_config(struct ixgbe_hw *hw,
- struct ixgbe_dcb_config *dcb_config)
+ struct ixgbe_dcb_config *dcb_config)
{
uint32_t reg;
uint32_t vlanctrl;
*/
reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC;
IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
-
- return;
}
static void
ixgbe_dcb_hw_arbite_rx_config(struct ixgbe_hw *hw, uint16_t *refill,
- uint16_t *max,uint8_t *bwg_id, uint8_t *tsa, uint8_t *map)
+ uint16_t *max, uint8_t *bwg_id, uint8_t *tsa, uint8_t *map)
{
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
{
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
- ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, bwg_id,tsa);
- ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, bwg_id,tsa);
+ ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, bwg_id, tsa);
+ ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, bwg_id, tsa);
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_X550EM_a:
- ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id,tsa);
- ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id,tsa, map);
+ ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id, tsa);
+ ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id, tsa, map);
break;
default:
break;
struct ixgbe_dcb_config *dcb_config)
{
int ret = 0;
- uint8_t i,pfc_en,nb_tcs;
+ uint8_t i, pfc_en, nb_tcs;
uint16_t pbsize, rx_buffer_size;
uint8_t config_dcb_rx = 0;
uint8_t config_dcb_tx = 0;
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- switch(dev->data->dev_conf.rxmode.mq_mode){
+ switch (dev->data->dev_conf.rxmode.mq_mode) {
case ETH_MQ_RX_VMDQ_DCB:
dcb_config->vt_mode = true;
if (hw->mac.type != ixgbe_mac_82598EB) {
case ETH_MQ_TX_VMDQ_DCB:
dcb_config->vt_mode = true;
config_dcb_tx = DCB_TX_CONFIG;
- /* get DCB and VT TX configuration parameters from rte_eth_conf */
- ixgbe_dcb_vt_tx_config(dev,dcb_config);
+ /* get DCB and VT TX configuration parameters
+ * from rte_eth_conf
+ */
+ ixgbe_dcb_vt_tx_config(dev, dcb_config);
/*Configure general VMDQ and DCB TX parameters*/
- ixgbe_vmdq_dcb_hw_tx_config(dev,dcb_config);
+ ixgbe_vmdq_dcb_hw_tx_config(dev, dcb_config);
break;
case ETH_MQ_TX_DCB:
/* Avoid un-configured priority mapping to TC0 */
uint8_t j = 4;
uint8_t mask = 0xFF;
+
for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES - 4; i++)
- mask = (uint8_t)(mask & (~ (1 << map[i])));
+ mask = (uint8_t)(mask & (~(1 << map[i])));
for (i = 0; mask && (i < IXGBE_DCB_MAX_TRAFFIC_CLASS); i++) {
if ((mask & 0x1) && (j < ETH_DCB_NUM_USER_PRIORITIES))
map[j++] = i;
/* Set RX buffer size */
pbsize = (uint16_t)(rx_buffer_size / nb_tcs);
uint32_t rxpbsize = pbsize << IXGBE_RXPBSIZE_SHIFT;
+
for (i = 0; i < nb_tcs; i++) {
IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
}
}
}
if (config_dcb_tx) {
- /* Only support an equally distributed Tx packet buffer strategy. */
+ /* Only support an equally distributed
+ * Tx packet buffer strategy.
+ */
uint32_t txpktsize = IXGBE_TXPBSIZE_MAX / nb_tcs;
uint32_t txpbthresh = (txpktsize / DCB_TX_PB) - IXGBE_TXPKT_SIZE_MAX;
+
for (i = 0; i < nb_tcs; i++) {
IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
}
/*Calculates traffic class credits*/
- ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config,max_frame,
+ ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config, max_frame,
IXGBE_DCB_TX_CONFIG);
- ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config,max_frame,
+ ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config, max_frame,
IXGBE_DCB_RX_CONFIG);
if (config_dcb_rx) {
ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_RX_CONFIG, bwgid);
ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_RX_CONFIG, tsa);
/* Configure PG(ETS) RX */
- ixgbe_dcb_hw_arbite_rx_config(hw,refill,max,bwgid,tsa,map);
+ ixgbe_dcb_hw_arbite_rx_config(hw, refill, max, bwgid, tsa, map);
}
if (config_dcb_tx) {
ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid);
ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa);
/* Configure PG(ETS) TX */
- ixgbe_dcb_hw_arbite_tx_config(hw,refill,max,bwgid,tsa,map);
+ ixgbe_dcb_hw_arbite_tx_config(hw, refill, max, bwgid, tsa, map);
}
/*Configure queue statistics registers*/
* If the TC count is 8,and the default high_water is 48,
* the low_water is 16 as default.
*/
- hw->fc.high_water[i] = (pbsize * 3 ) / 4;
+ hw->fc.high_water[i] = (pbsize * 3) / 4;
hw->fc.low_water[i] = pbsize / 4;
/* Enable pfc for this TC */
tc = &dcb_config->tc_config[i];
/** Configure DCB hardware **/
ixgbe_dcb_hw_configure(dev, dcb_cfg);
-
- return;
}
/*
/* PFVLVF, PFVLVFB: set up filters for vlan tags as configured */
for (i = 0; i < cfg->nb_pool_maps; i++) {
/* set vlan id in VF register and set the valid bit */
- IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), (IXGBE_VLVF_VIEN | \
+ IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), (IXGBE_VLVF_VIEN |
(cfg->pool_map[i].vlan_id & IXGBE_RXD_VLAN_ID_MASK)));
/*
* Put the allowed pools in VFB reg. As we only have 16 or 64
* i.e. bits 0-31
*/
if (((cfg->pool_map[i].pools >> 32) & UINT32_MAX) == 0)
- IXGBE_WRITE_REG(hw, IXGBE_VLVFB(i*2), \
+ IXGBE_WRITE_REG(hw, IXGBE_VLVFB(i * 2),
(cfg->pool_map[i].pools & UINT32_MAX));
else
- IXGBE_WRITE_REG(hw, IXGBE_VLVFB((i*2+1)), \
- ((cfg->pool_map[i].pools >> 32) \
- & UINT32_MAX));
+ IXGBE_WRITE_REG(hw, IXGBE_VLVFB((i * 2 + 1)),
+ ((cfg->pool_map[i].pools >> 32) & UINT32_MAX));
}
/* Disable drop for all queues */
for (q = 0; q < IXGBE_MAX_RX_QUEUE_NUM; q++)
IXGBE_WRITE_REG(hw, IXGBE_QDE,
- (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT)));
+ (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT)));
/* Enable the Tx desc arbiter */
reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
IXGBE_WRITE_FLUSH(hw);
-
- return;
}
static int __attribute__((cold))
{
struct ixgbe_rx_entry *rxe = rxq->sw_ring;
uint64_t dma_addr;
- unsigned i;
+ unsigned int i;
/* Initialize software ring entries */
for (i = 0; i < rxq->nb_rx_desc; i++) {
volatile union ixgbe_adv_rx_desc *rxd;
struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool);
+
if (mbuf == NULL) {
PMD_INIT_LOG(ERR, "RX mbuf alloc failed queue_id=%u",
(unsigned) rxq->queue_id);
for (i = 0; i < dev->data->nb_rx_queues; i++) {
struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
+
rxq->rx_using_sse = rx_using_sse;
}
}
/* RFCTL configuration */
if (rsc_capable) {
uint32_t rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL);
+
if (rx_conf->enable_lro)
/*
* Since NFS packets coalescing is not supported - clear
if (hw->mac.type == ixgbe_mac_82599EB) {
/* Must setup the PSRTYPE register */
uint32_t psrtype;
+
psrtype = IXGBE_PSRTYPE_TCPHDR |
IXGBE_PSRTYPE_UDPHDR |
IXGBE_PSRTYPE_IPV4HDR |
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
/* Enable TX CRC (checksum offload requirement) and hw padding
- * (TSO requirement) */
+ * (TSO requirement)
+ */
hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
hlreg0 |= (IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN);
IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
* bookkeeping if things aren't delivered in order.
*/
switch (hw->mac.type) {
- case ixgbe_mac_82598EB:
- txctrl = IXGBE_READ_REG(hw,
- IXGBE_DCA_TXCTRL(txq->reg_idx));
- txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
- IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(txq->reg_idx),
- txctrl);
- break;
+ case ixgbe_mac_82598EB:
+ txctrl = IXGBE_READ_REG(hw,
+ IXGBE_DCA_TXCTRL(txq->reg_idx));
+ txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
+ IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(txq->reg_idx),
+ txctrl);
+ break;
- case ixgbe_mac_82599EB:
- case ixgbe_mac_X540:
- case ixgbe_mac_X550:
- case ixgbe_mac_X550EM_x:
- case ixgbe_mac_X550EM_a:
- default:
- txctrl = IXGBE_READ_REG(hw,
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_X550EM_a:
+ default:
+ txctrl = IXGBE_READ_REG(hw,
IXGBE_DCA_TXCTRL_82599(txq->reg_idx));
- txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
- IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(txq->reg_idx),
- txctrl);
- break;
+ txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
+ IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(txq->reg_idx),
+ txctrl);
+ break;
}
}
PMD_INIT_FUNC_TRACE();
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- if (tx_queue_id < dev->data->nb_tx_queues) {
- txq = dev->data->tx_queues[tx_queue_id];
+ if (tx_queue_id >= dev->data->nb_tx_queues)
+ return -1;
- /* Wait until TX queue is empty */
- if (hw->mac.type == ixgbe_mac_82599EB) {
- poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
- do {
- rte_delay_us(RTE_IXGBE_WAIT_100_US);
- txtdh = IXGBE_READ_REG(hw,
- IXGBE_TDH(txq->reg_idx));
- txtdt = IXGBE_READ_REG(hw,
- IXGBE_TDT(txq->reg_idx));
- } while (--poll_ms && (txtdh != txtdt));
- if (!poll_ms)
- PMD_INIT_LOG(ERR, "Tx Queue %d is not empty "
- "when stopping.", tx_queue_id);
- }
+ txq = dev->data->tx_queues[tx_queue_id];
- txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
- txdctl &= ~IXGBE_TXDCTL_ENABLE;
- IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
+ /* Wait until TX queue is empty */
+ if (hw->mac.type == ixgbe_mac_82599EB) {
+ poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
+ do {
+ rte_delay_us(RTE_IXGBE_WAIT_100_US);
+ txtdh = IXGBE_READ_REG(hw,
+ IXGBE_TDH(txq->reg_idx));
+ txtdt = IXGBE_READ_REG(hw,
+ IXGBE_TDT(txq->reg_idx));
+ } while (--poll_ms && (txtdh != txtdt));
+ if (!poll_ms)
+ PMD_INIT_LOG(ERR, "Tx Queue %d is not empty "
+ "when stopping.", tx_queue_id);
+ }
- /* Wait until TX Enable ready */
- if (hw->mac.type == ixgbe_mac_82599EB) {
- poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
- do {
- rte_delay_ms(1);
- txdctl = IXGBE_READ_REG(hw,
+ txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
+ txdctl &= ~IXGBE_TXDCTL_ENABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
+
+ /* Wait until TX Enable ready */
+ if (hw->mac.type == ixgbe_mac_82599EB) {
+ poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
+ do {
+ rte_delay_ms(1);
+ txdctl = IXGBE_READ_REG(hw,
IXGBE_TXDCTL(txq->reg_idx));
- } while (--poll_ms && (txdctl | IXGBE_TXDCTL_ENABLE));
- if (!poll_ms)
- PMD_INIT_LOG(ERR, "Could not disable "
- "Tx Queue %d", tx_queue_id);
- }
+ } while (--poll_ms && (txdctl | IXGBE_TXDCTL_ENABLE));
+ if (!poll_ms)
+ PMD_INIT_LOG(ERR, "Could not disable "
+ "Tx Queue %d", tx_queue_id);
+ }
- if (txq->ops != NULL) {
- txq->ops->release_mbufs(txq);
- txq->ops->reset(txq);
- }
- dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
- } else
- return -1;
+ if (txq->ops != NULL) {
+ txq->ops->release_mbufs(txq);
+ txq->ops->reset(txq);
+ }
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
return 0;
}
/* pkt type + vlan olflags mask */
const __m128i pkttype_msk = _mm_set_epi16(
0x0000, 0x0000, 0x0000, 0x0000,
- PKT_RX_VLAN_PKT, PKT_RX_VLAN_PKT, PKT_RX_VLAN_PKT, PKT_RX_VLAN_PKT);
+ PKT_RX_VLAN_PKT, PKT_RX_VLAN_PKT,
+ PKT_RX_VLAN_PKT, PKT_RX_VLAN_PKT);
/* mask everything except rss type */
const __m128i rsstype_msk = _mm_set_epi16(
nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_IXGBE_DESCS_PER_LOOP);
/* Just the act of getting into the function from the application is
- * going to cost about 7 cycles */
+ * going to cost about 7 cycles
+ */
rxdp = rxq->rx_ring + rxq->rx_tail;
_mm_prefetch((const void *)rxdp, _MM_HINT_T0);
/* See if we need to rearm the RX queue - gives the prefetch a bit
- * of time to act */
+ * of time to act
+ */
if (rxq->rxrearm_nb > RTE_IXGBE_RXQ_REARM_THRESH)
ixgbe_rxq_rearm(rxq);
/* Before we start moving massive data around, check to see if
- * there is actually a packet available */
+ * there is actually a packet available
+ */
if (!(rxdp->wb.upper.status_error &
rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
return 0;
);
/* Cache is empty -> need to scan the buffer rings, but first move
- * the next 'n' mbufs into the cache */
+ * the next 'n' mbufs into the cache
+ */
sw_ring = &rxq->sw_ring[rxq->rx_tail];
/* A. load 4 packet in one loop
/* the staterr values are not in order, as the count
* count of dd bits doesn't care. However, for end of
* packet tracking, we do care, so shuffle. This also
- * compresses the 32-bit values to 8-bit */
+ * compresses the 32-bit values to 8-bit
+ */
eop_bits = _mm_shuffle_epi8(eop_bits, eop_shuf_mask);
/* store the resulting 32-bit value */
*(int *)split_packet = _mm_cvtsi128_si32(eop_bits);
static inline uint16_t
reassemble_packets(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_bufs,
- uint16_t nb_bufs, uint8_t *split_flags)
+ uint16_t nb_bufs, uint8_t *split_flags)
{
struct rte_mbuf *pkts[nb_bufs]; /*finished pkts*/
struct rte_mbuf *start = rxq->pkt_first_seg;
struct rte_mbuf *end = rxq->pkt_last_seg;
- unsigned pkt_idx, buf_idx;
+ unsigned int pkt_idx, buf_idx;
for (buf_idx = 0, pkt_idx = 0; buf_idx < nb_bufs; buf_idx++) {
if (end != NULL) {
struct rte_mbuf **pkt, uint16_t nb_pkts, uint64_t flags)
{
int i;
+
for (i = 0; i < nb_pkts; ++i, ++txdp, ++pkt)
vtx1(txdp, *pkt, flags);
}
struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
int i;
+
for (i = 0; i < (int)nb_pkts; ++i)
txep[i].mbuf = tx_pkts[i];
}
static void __attribute__((cold))
ixgbe_tx_queue_release_mbufs_vec(struct ixgbe_tx_queue *txq)
{
- unsigned i;
+ unsigned int i;
struct ixgbe_tx_entry_v *txe;
const uint16_t max_desc = (uint16_t)(txq->nb_tx_desc - 1);
void __attribute__((cold))
ixgbe_rx_queue_release_mbufs_vec(struct ixgbe_rx_queue *rxq)
{
- const unsigned mask = rxq->nb_rx_desc - 1;
- unsigned i;
+ const unsigned int mask = rxq->nb_rx_desc - 1;
+ unsigned int i;
if (rxq->sw_ring == NULL || rxq->rxrearm_nb >= rxq->nb_rx_desc)
return;
static void __attribute__((cold))
ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
{
- static const union ixgbe_adv_tx_desc zeroed_desc = {{0}};
+ static const union ixgbe_adv_tx_desc zeroed_desc = { { 0 } };
struct ixgbe_tx_entry_v *txe = txq->sw_ring_v;
uint16_t i;
/* Initialize SW ring entries */
for (i = 0; i < txq->nb_tx_desc; i++) {
volatile union ixgbe_adv_tx_desc *txd = &txq->tx_ring[i];
+
txd->wb.status = IXGBE_TXD_STAT_DD;
txe[i].mbuf = NULL;
}