if ((hw->mac.type != ixgbe_mac_82599EB) && (hw->mac.type != ixgbe_mac_X540))
return -ENOSYS;
- PMD_INIT_LOG(INFO, "Setting port %d, %s queue_id %d to stat index %d\n",
+ PMD_INIT_LOG(INFO, "Setting port %d, %s queue_id %d to stat index %d",
(int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
queue_id, stat_idx);
n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG);
if (n >= IXGBE_NB_STAT_MAPPING_REGS) {
- PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded\n");
+ PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded");
return -EIO;
}
offset = (uint8_t)(queue_id % NB_QMAP_FIELDS_PER_QSM_REG);
else
stat_mappings->rqsmr[n] |= qsmr_mask;
- PMD_INIT_LOG(INFO, "Set port %d, %s queue_id %d to stat index %d\n"
- "%s[%d] = 0x%08x\n",
+ PMD_INIT_LOG(INFO, "Set port %d, %s queue_id %d to stat index %d",
(int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
- queue_id, stat_idx, is_rx ? "RQSMR" : "TQSM", n,
+ queue_id, stat_idx);
+ PMD_INIT_LOG(INFO, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n,
is_rx ? stat_mappings->rqsmr[n] : stat_mappings->tqsm[n]);
/* Now write the mapping in the appropriate register */
if (is_rx) {
- PMD_INIT_LOG(INFO, "Write 0x%x to RX IXGBE stat mapping reg:%d\n",
+ PMD_INIT_LOG(INFO, "Write 0x%x to RX IXGBE stat mapping reg:%d",
stat_mappings->rqsmr[n], n);
IXGBE_WRITE_REG(hw, IXGBE_RQSMR(n), stat_mappings->rqsmr[n]);
}
else {
- PMD_INIT_LOG(INFO, "Write 0x%x to TX IXGBE stat mapping reg:%d\n",
+ PMD_INIT_LOG(INFO, "Write 0x%x to TX IXGBE stat mapping reg:%d",
stat_mappings->tqsm[n], n);
IXGBE_WRITE_REG(hw, IXGBE_TQSM(n), stat_mappings->tqsm[n]);
}
if (diag == IXGBE_ERR_EEPROM_VERSION) {
PMD_INIT_LOG(ERR, "This device is a pre-production adapter/"
"LOM. Please be aware there may be issues associated "
- "with your hardware.\n If you are experiencing problems "
+ "with your hardware.");
+ PMD_INIT_LOG(ERR, "If you are experiencing problems "
"please contact your Intel or hardware representative "
- "who provided you with this hardware.\n");
+ "who provided you with this hardware.");
} else if (diag == IXGBE_ERR_SFP_NOT_SUPPORTED)
- PMD_INIT_LOG(ERR, "Unsupported SFP+ Module\n");
+ PMD_INIT_LOG(ERR, "Unsupported SFP+ Module");
if (diag) {
PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", diag);
return -EIO;
IXGBE_WRITE_FLUSH(hw);
if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
- PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d<n",
+ PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d",
(int) hw->mac.type, (int) hw->phy.type,
(int) hw->phy.sfp_type);
else
- PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d\n",
+ PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
(int) hw->mac.type, (int) hw->phy.type);
PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
return (-EIO);
}
- PMD_INIT_LOG(DEBUG, "\nport %d vendorID=0x%x deviceID=0x%x mac.type=%s\n",
+ PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s",
eth_dev->data->port_id, pci_dev->id.vendor_id,
pci_dev->id.device_id, "ixgbe_mac_82599_vf");
/* IXGBE devices don't support half duplex */
if ((dev->data->dev_conf.link_duplex != ETH_LINK_AUTONEG_DUPLEX) &&
(dev->data->dev_conf.link_duplex != ETH_LINK_FULL_DUPLEX)) {
- PMD_INIT_LOG(ERR, "Invalid link_duplex (%hu) for port %hhu\n",
+ PMD_INIT_LOG(ERR, "Invalid link_duplex (%hu) for port %hhu",
dev->data->dev_conf.link_duplex,
dev->data->port_id);
return -EINVAL;
/* This can fail when allocating mbufs for descriptor rings */
err = ixgbe_dev_rx_init(dev);
if (err) {
- PMD_INIT_LOG(ERR, "Unable to initialize RX hardware\n");
+ PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
goto error;
}
speed = IXGBE_LINK_SPEED_10GB_FULL;
break;
default:
- PMD_INIT_LOG(ERR, "Invalid link_speed (%hu) for port %hhu\n",
+ PMD_INIT_LOG(ERR, "Invalid link_speed (%hu) for port %hhu",
dev->data->dev_conf.link_speed,
dev->data->port_id);
goto error;
#ifdef RTE_NIC_BYPASS
if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
/* Not suported in bypass mode */
- PMD_INIT_LOG(ERR, "\nSet link up is not supported "
- "by device id 0x%x\n", hw->device_id);
+ PMD_INIT_LOG(ERR, "Set link up is not supported "
+ "by device id 0x%x", hw->device_id);
return -ENOTSUP;
}
#endif
return 0;
}
- PMD_INIT_LOG(ERR, "\nSet link up is not supported by device id 0x%x\n",
+ PMD_INIT_LOG(ERR, "Set link up is not supported by device id 0x%x",
hw->device_id);
return -ENOTSUP;
}
#ifdef RTE_NIC_BYPASS
if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
/* Not suported in bypass mode */
- PMD_INIT_LOG(ERR, "\nSet link down is not supported "
- "by device id 0x%x\n", hw->device_id);
+ PMD_INIT_LOG(ERR, "Set link down is not supported "
+ "by device id 0x%x", hw->device_id);
return -ENOTSUP;
}
#endif
return 0;
}
- PMD_INIT_LOG(ERR, "\nSet link down is not supported by device id 0x%x\n",
+ PMD_INIT_LOG(ERR, "Set link down is not supported by device id 0x%x",
hw->device_id);
return -ENOTSUP;
}
struct rte_eth_link link;
int intr_enable_delay = false;
- PMD_DRV_LOG(DEBUG, "intr action type %d\n", intr->flags);
+ PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
if (intr->flags & IXGBE_FLAG_MAILBOX) {
ixgbe_pf_mbx_process(dev);
_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
}
- PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]\n", eicr);
+ PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
ixgbe_enable_intr(dev);
rte_intr_enable(&(dev->pci_dev->intr_handle));
}
if (fc_conf->autoneg != !hw->fc.disable_fc_autoneg)
return -ENOTSUP;
rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0));
- PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x \n", rx_buf_size);
+ PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
/*
* At least reserve one Ethernet frame for watermark
max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT;
if ((fc_conf->high_water > max_high_water) ||
(fc_conf->high_water < fc_conf->low_water)) {
- PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB\n");
- PMD_INIT_LOG(ERR, "High_water must <= 0x%x\n", max_high_water);
+ PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
+ PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
return (-EINVAL);
}
return 0;
}
- PMD_INIT_LOG(ERR, "ixgbe_fc_enable = 0x%x \n", err);
+ PMD_INIT_LOG(ERR, "ixgbe_fc_enable = 0x%x", err);
return -EIO;
}
if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
/* High/Low water can not be 0 */
if( (!hw->fc.high_water[tc_num])|| (!hw->fc.low_water[tc_num])) {
- PMD_INIT_LOG(ERR, "Invalid water mark configuration\n");
+ PMD_INIT_LOG(ERR, "Invalid water mark configuration");
ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
goto out;
}
if(hw->fc.low_water[tc_num] >= hw->fc.high_water[tc_num]) {
- PMD_INIT_LOG(ERR, "Invalid water mark configuration\n");
+ PMD_INIT_LOG(ERR, "Invalid water mark configuration");
ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
goto out;
}
ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map);
tc_num = map[pfc_conf->priority];
rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num));
- PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x \n", rx_buf_size);
+ PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
/*
* At least reserve one Ethernet frame for watermark
* high_water/low_water in kilo bytes for ixgbe
max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT;
if ((pfc_conf->fc.high_water > max_high_water) ||
(pfc_conf->fc.high_water <= pfc_conf->fc.low_water)) {
- PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB\n");
- PMD_INIT_LOG(ERR, "High_water must <= 0x%x\n", max_high_water);
+ PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
+ PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
return (-EINVAL);
}
if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED))
return 0;
- PMD_INIT_LOG(ERR, "ixgbe_dcb_pfc_enable = 0x%x \n", err);
+ PMD_INIT_LOG(ERR, "ixgbe_dcb_pfc_enable = 0x%x", err);
return -EIO;
}
{
struct rte_eth_conf* conf = &dev->data->dev_conf;
- PMD_INIT_LOG(DEBUG, "\nConfigured Virtual Function port id: %d\n",
+ PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
dev->data->port_id);
/*
*/
#ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC
if (!conf->rxmode.hw_strip_crc) {
- PMD_INIT_LOG(INFO, "VF can't disable HW CRC Strip\n");
+ PMD_INIT_LOG(INFO, "VF can't disable HW CRC Strip");
conf->rxmode.hw_strip_crc = 1;
}
#else
if (conf->rxmode.hw_strip_crc) {
- PMD_INIT_LOG(INFO, "VF can't enable HW CRC Strip\n");
+ PMD_INIT_LOG(INFO, "VF can't enable HW CRC Strip");
conf->rxmode.hw_strip_crc = 0;
}
#endif
/* This can fail when allocating mbufs for descriptor rings */
err = ixgbevf_dev_rx_init(dev);
if (err) {
- PMD_INIT_LOG(ERR, "Unable to initialize RX hardware (%d)\n",
- err);
+ PMD_INIT_LOG(ERR, "Unable to initialize RX hardware (%d)", err);
ixgbe_dev_clear_queues(dev);
return err;
}
/* we only need to do this if VMDq is enabled */
reg_val = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
if (!(reg_val & IXGBE_VT_CTL_VT_ENABLE)) {
- PMD_INIT_LOG(ERR, "VMDq must be enabled for this setting\n");
+ PMD_INIT_LOG(ERR, "VMDq must be enabled for this setting");
return (-1);
}
if (hw->mac.type == ixgbe_mac_82598EB) {
PMD_INIT_LOG(ERR, "setting VF receive mode set should be done"
- " on 82599 hardware and newer\n");
+ " on 82599 hardware and newer");
return (-ENOTSUP);
}
if (ixgbe_vmdq_mode_check(hw) < 0)
}
if (i >= IXGBE_FDIR_INIT_DONE_POLL)
- PMD_INIT_LOG(WARNING, "Flow Director poll time exceeded!\n");
+ PMD_INIT_LOG(WARNING, "Flow Director poll time exceeded!");
}
/*
fdirhashcmd |= fdirhash;
IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd);
- PMD_INIT_LOG(DEBUG, "Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd);
+ PMD_INIT_LOG(DEBUG, "Tx Queue=%x hash=%x", queue, (u32)fdirhashcmd);
}
/*
/* use the L4 protocol mask for raw IPv4/IPv6 traffic */
fdirm |= IXGBE_FDIRM_L4P;
if (input_mask->dst_port_mask || input_mask->src_port_mask) {
- PMD_INIT_LOG(ERR, " Error on src/dst port mask\n");
+ PMD_INIT_LOG(ERR, " Error on src/dst port mask");
return -EINVAL;
}
}
tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
- " tx_first=%u tx_last=%u\n",
+ " tx_first=%u tx_last=%u",
(unsigned) txq->port_id,
(unsigned) txq->queue_id,
(unsigned) pkt_len,
if (ixgbe_rx_alloc_bufs(rxq) != 0) {
int i, j;
PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
- "queue_id=%u\n", (unsigned) rxq->port_id,
+ "queue_id=%u", (unsigned) rxq->port_id,
(unsigned) rxq->queue_id);
rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
* frames to its peer(s).
*/
PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
- "ext_err_stat=0x%08x pkt_len=%u\n",
+ "ext_err_stat=0x%08x pkt_len=%u",
(unsigned) rxq->port_id, (unsigned) rxq->queue_id,
(unsigned) rx_id, (unsigned) staterr,
(unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
nmb = rte_rxmbuf_alloc(rxq->mb_pool);
if (nmb == NULL) {
PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
- "queue_id=%u\n", (unsigned) rxq->port_id,
+ "queue_id=%u", (unsigned) rxq->port_id,
(unsigned) rxq->queue_id);
rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
break;
nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
if (nb_hold > rxq->rx_free_thresh) {
PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
- "nb_hold=%u nb_rx=%u\n",
+ "nb_hold=%u nb_rx=%u",
(unsigned) rxq->port_id, (unsigned) rxq->queue_id,
(unsigned) rx_id, (unsigned) nb_hold,
(unsigned) nb_rx);
* to happen by sending specific "back-pressure" flow control
* frames to its peer(s).
*/
- PMD_RX_LOG(DEBUG, "\nport_id=%u queue_id=%u rx_id=%u "
- "staterr=0x%x data_len=%u\n",
+ PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
+ "staterr=0x%x data_len=%u",
(unsigned) rxq->port_id, (unsigned) rxq->queue_id,
(unsigned) rx_id, (unsigned) staterr,
(unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
nmb = rte_rxmbuf_alloc(rxq->mb_pool);
if (nmb == NULL) {
PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
- "queue_id=%u\n", (unsigned) rxq->port_id,
+ "queue_id=%u", (unsigned) rxq->port_id,
(unsigned) rxq->queue_id);
rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
break;
nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
if (nb_hold > rxq->rx_free_thresh) {
PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
- "nb_hold=%u nb_rx=%u\n",
+ "nb_hold=%u nb_rx=%u",
(unsigned) rxq->port_id, (unsigned) rxq->queue_id,
(unsigned) rx_id, (unsigned) nb_hold,
(unsigned) nb_rx);
ixgbe_tx_queue_release(txq);
return (-ENOMEM);
}
- PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n",
+ PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
/* Use a simple Tx queue (no offloads, no multi segs) if possible */
if (((txq->txq_flags & IXGBE_SIMPLE_FLAGS) == IXGBE_SIMPLE_FLAGS) &&
(txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST)) {
- PMD_INIT_LOG(INFO, "Using simple tx code path\n");
+ PMD_INIT_LOG(INFO, "Using simple tx code path");
#ifdef RTE_IXGBE_INC_VECTOR
if (txq->tx_rs_thresh <= RTE_IXGBE_TX_MAX_FREE_BUF_SZ &&
ixgbe_txq_vec_setup(txq) == 0) {
- PMD_INIT_LOG(INFO, "Vector tx enabled.\n");
+ PMD_INIT_LOG(INFO, "Vector tx enabled.");
dev->tx_pkt_burst = ixgbe_xmit_pkts_vec;
}
else
#endif
dev->tx_pkt_burst = ixgbe_xmit_pkts_simple;
} else {
- PMD_INIT_LOG(INFO, "Using full-featured tx code path\n");
+ PMD_INIT_LOG(INFO, "Using full-featured tx code path");
PMD_INIT_LOG(INFO, " - txq_flags = %lx "
- "[IXGBE_SIMPLE_FLAGS=%lx]\n",
+ "[IXGBE_SIMPLE_FLAGS=%lx]",
(long unsigned)txq->txq_flags,
(long unsigned)IXGBE_SIMPLE_FLAGS);
PMD_INIT_LOG(INFO, " - tx_rs_thresh = %lu "
- "[RTE_PMD_IXGBE_TX_MAX_BURST=%lu]\n",
+ "[RTE_PMD_IXGBE_TX_MAX_BURST=%lu]",
(long unsigned)txq->tx_rs_thresh,
(long unsigned)RTE_PMD_IXGBE_TX_MAX_BURST);
dev->tx_pkt_burst = ixgbe_xmit_pkts;
ixgbe_rx_queue_release(rxq);
return (-ENOMEM);
}
- PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n",
+ PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
/*
#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
"satisfied. Rx Burst Bulk Alloc function will be "
- "used on port=%d, queue=%d.\n",
+ "used on port=%d, queue=%d.",
rxq->port_id, rxq->queue_id);
dev->rx_pkt_burst = ixgbe_recv_pkts_bulk_alloc;
#ifdef RTE_IXGBE_INC_VECTOR
if (!ixgbe_rx_vec_condition_check(dev)) {
PMD_INIT_LOG(INFO, "Vector rx enabled, please make "
- "sure RX burst size no less than 32.\n");
+ "sure RX burst size no less than 32.");
ixgbe_rxq_vec_setup(rxq);
dev->rx_pkt_burst = ixgbe_recv_pkts_vec;
}
PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions "
"are not satisfied, Scattered Rx is requested, "
"or RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC is not "
- "enabled (port=%d, queue=%d).\n",
+ "enabled (port=%d, queue=%d).",
rxq->port_id, rxq->queue_id);
}
dev->data->rx_queues[queue_idx] = rxq;
uint32_t desc = 0;
if (rx_queue_id >= dev->data->nb_rx_queues) {
- PMD_RX_LOG(ERR, "Invalid RX queue id=%d\n", rx_queue_id);
+ PMD_RX_LOG(ERR, "Invalid RX queue id=%d", rx_queue_id);
return 0;
}
ixgbe_dcb_rx_hw_config(hw, dcb_config);
break;
default:
- PMD_INIT_LOG(ERR, "Incorrect DCB RX mode configuration\n");
+ PMD_INIT_LOG(ERR, "Incorrect DCB RX mode configuration");
break;
}
switch (dev->data->dev_conf.txmode.mq_mode) {
ixgbe_dcb_tx_hw_config(hw, dcb_config);
break;
default:
- PMD_INIT_LOG(ERR, "Incorrect DCB TX mode configuration\n");
+ PMD_INIT_LOG(ERR, "Incorrect DCB TX mode configuration");
break;
}
volatile union ixgbe_adv_rx_desc *rxd;
struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool);
if (mbuf == NULL) {
- PMD_INIT_LOG(ERR, "RX mbuf alloc failed queue_id=%u\n",
+ PMD_INIT_LOG(ERR, "RX mbuf alloc failed queue_id=%u",
(unsigned) rxq->queue_id);
return (-ENOMEM);
}
if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM) !=
IXGBE_SUCCESS) {
- PMD_INIT_LOG(ERR, "Could not enable loopback mode\n");
+ PMD_INIT_LOG(ERR, "Could not enable loopback mode");
/* ignore error */
return;
}
/* Allocate buffers for descriptor rings */
if (ixgbe_alloc_rx_queue_mbufs(rxq) != 0) {
- PMD_INIT_LOG(ERR, "Could not alloc mbuf for queue:%d\n",
+ PMD_INIT_LOG(ERR, "Could not alloc mbuf for queue:%d",
rx_queue_id);
return -1;
}
rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
} while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE));
if (!poll_ms)
- PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d\n",
+ PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d",
rx_queue_id);
rte_wmb();
IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0);
rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
} while (--poll_ms && (rxdctl | IXGBE_RXDCTL_ENABLE));
if (!poll_ms)
- PMD_INIT_LOG(ERR, "Could not disable Rx Queue %d\n",
+ PMD_INIT_LOG(ERR, "Could not disable Rx Queue %d",
rx_queue_id);
rte_delay_us(RTE_IXGBE_WAIT_100_US);
} while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE));
if (!poll_ms)
PMD_INIT_LOG(ERR, "Could not enable "
- "Tx Queue %d\n", tx_queue_id);
+ "Tx Queue %d", tx_queue_id);
}
rte_wmb();
IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0);
} while (--poll_ms && (txtdh != txtdt));
if (!poll_ms)
PMD_INIT_LOG(ERR, "Tx Queue %d is not empty "
- "when stopping.\n", tx_queue_id);
+ "when stopping.", tx_queue_id);
}
txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
} while (--poll_ms && (txdctl | IXGBE_TXDCTL_ENABLE));
if (!poll_ms)
PMD_INIT_LOG(ERR, "Could not disable "
- "Tx Queue %d\n", tx_queue_id);
+ "Tx Queue %d", tx_queue_id);
}
if (txq->ops != NULL) {
txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
} while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE));
if (!poll_ms)
- PMD_INIT_LOG(ERR, "Could not enable Tx Queue %d\n", i);
+ PMD_INIT_LOG(ERR, "Could not enable Tx Queue %d", i);
}
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
} while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE));
if (!poll_ms)
- PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d\n", i);
+ PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", i);
rte_wmb();
IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), rxq->nb_rx_desc - 1);