}
PMD_TX_LOG(DEBUG,
- "start bd: nbytes %d flags %x vlan %x\n",
+ "start bd: nbytes %d flags %x vlan %x",
tx_start_bd->nbytes,
tx_start_bd->bd_flags.as_bitfield,
tx_start_bd->vlan_or_ethertype);
dev->data->nb_rx_queues * sizeof(int), 0);
if (intr_handle->intr_vec == NULL) {
PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
- " intr_vec\n", dev->data->nb_rx_queues);
+ " intr_vec", dev->data->nb_rx_queues);
return -ENOMEM;
}
(void *)dev);
if (dev->data->dev_conf.intr_conf.lsc != 0)
PMD_INIT_LOG(INFO, "lsc won't enable because of"
- " no intr multiplex\n");
+ " no intr multiplexn");
}
/* check if rxq interrupt is enabled */
if (dev->data->dev_conf.intr_conf.rxq != 0)
dev->data->nb_rx_queues * sizeof(int), 0);
if (intr_handle->intr_vec == NULL) {
PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
- " intr_vec\n", dev->data->nb_rx_queues);
+ " intr_vec", dev->data->nb_rx_queues);
return -ENOMEM;
}
}
(void *)dev);
if (dev->data->dev_conf.intr_conf.lsc != 0)
PMD_INIT_LOG(INFO, "lsc won't enable because of"
- " no intr multiplex\n");
+ " no intr multiplex");
}
/* check if rxq interrupt is enabled */
dev->data->nb_rx_queues * sizeof(int), 0);
if (!intr_handle->intr_vec) {
PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
- " intr_vec\n", dev->data->nb_rx_queues);
+ " intr_vec", dev->data->nb_rx_queues);
return -ENOMEM;
}
}
if (reta_size != ETH_RSS_RETA_SIZE_128) {
PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
"(%d) doesn't match the number hardware can supported "
- "(%d)\n", reta_size, ETH_RSS_RETA_SIZE_128);
+ "(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
return -EINVAL;
}
if (reta_size != ETH_RSS_RETA_SIZE_128) {
PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
"(%d) doesn't match the number hardware can supported "
- "(%d)\n", reta_size, ETH_RSS_RETA_SIZE_128);
+ "(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
return -EINVAL;
}
(struct rte_eth_syn_filter *)arg);
break;
default:
- PMD_DRV_LOG(ERR, "unsupported operation %u\n", filter_op);
+ PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op);
ret = -EINVAL;
break;
}
if (rc) {
PMD_INIT_LOG(ERR,
- "failed to restart queue %d type(%d)\n",
+ "failed to restart queue %d type(%d)",
i, ring_type);
return -1;
}
uint32_t max_frame_len = ena_get_mtu_conf(adapter);
if (max_frame_len > adapter->max_mtu) {
- PMD_INIT_LOG(ERR, "Unsupported MTU of %d\n", max_frame_len);
+ PMD_INIT_LOG(ERR, "Unsupported MTU of %d", max_frame_len);
return -1;
}
queue_size = rte_align32pow2(queue_size >> 1);
if (queue_size == 0) {
- PMD_INIT_LOG(ERR, "Invalid queue size\n");
+ PMD_INIT_LOG(ERR, "Invalid queue size");
return -EFAULT;
}
rc = ena_populate_rx_queue(ring, ring->ring_size);
if ((unsigned int)rc != ring->ring_size) {
- PMD_INIT_LOG(ERR, "Failed to populate rx ring !\n");
+ PMD_INIT_LOG(ERR, "Failed to populate rx ring !");
return (-1);
}
pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
adapter->pdev = pci_dev;
- PMD_INIT_LOG(INFO, "Initializing %x:%x:%x.%d\n",
+ PMD_INIT_LOG(INFO, "Initializing %x:%x:%x.%d",
pci_dev->addr.domain,
pci_dev->addr.bus,
pci_dev->addr.devid,
else if (adapter->regs)
ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
else
- PMD_INIT_LOG(CRIT, "Failed to access registers BAR(%d)\n",
+ PMD_INIT_LOG(CRIT, "Failed to access registers BAR(%d)",
ENA_REGS_BAR);
ena_dev->reg_bar = adapter->regs;
/* device specific initialization routine */
rc = ena_device_init(ena_dev, &get_feat_ctx);
if (rc) {
- PMD_INIT_LOG(CRIT, "Failed to init ENA device\n");
+ PMD_INIT_LOG(CRIT, "Failed to init ENA device");
return -1;
}
if (get_feat_ctx.max_queues.max_llq_num == 0) {
PMD_INIT_LOG(ERR,
"Trying to use LLQ but llq_num is 0.\n"
- "Fall back into regular queues.\n");
+ "Fall back into regular queues.");
ena_dev->tx_mem_queue_type =
ENA_ADMIN_PLACEMENT_POLICY_HOST;
adapter->num_queues =
if (!(adapter->state == ENA_ADAPTER_STATE_INIT ||
adapter->state == ENA_ADAPTER_STATE_STOPPED)) {
- PMD_INIT_LOG(ERR, "Illegal adapter state: %d\n",
+ PMD_INIT_LOG(ERR, "Illegal adapter state: %d",
adapter->state);
return -1;
}
0);
if (!intr_handle->intr_vec) {
PMD_INIT_LOG(ERR,
- "Failed to allocate %d rx_queues intr_vec\n",
+ "Failed to allocate %d rx_queues intr_vec",
dev->data->nb_rx_queues);
return -ENOMEM;
}
if (dev->data->dev_conf.intr_conf.lsc != 0)
PMD_INIT_LOG(INFO,
- "lsc won't enable because of no intr multiplex\n");
+ "lsc won't enable because of no intr multiplex");
} else if (dev->data->dev_conf.intr_conf.lsc != 0) {
ret = i40e_aq_set_phy_int_mask(hw,
~(I40E_AQ_EVENT_LINK_UPDOWN |
else {
ret = -EINVAL;
PMD_DRV_LOG(ERR,
- "Unsupported vlan type in single vlan.\n");
+ "Unsupported vlan type in single vlan.");
return ret;
}
break;
if (reta_size != lut_size ||
reta_size > ETH_RSS_RETA_SIZE_512) {
PMD_DRV_LOG(ERR,
- "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)\n",
+ "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
reta_size, lut_size);
return -EINVAL;
}
if (reta_size != lut_size ||
reta_size > ETH_RSS_RETA_SIZE_512) {
PMD_DRV_LOG(ERR,
- "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)\n",
+ "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
reta_size, lut_size);
return -EINVAL;
}
ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
if (ret)
- PMD_DRV_LOG(ERR, "update vsi switch failed, aq_err=%d\n",
+ PMD_DRV_LOG(ERR, "update vsi switch failed, aq_err=%d",
hw->aq.asq_last_status);
}
else if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_512)
settings.hash_lut_size = I40E_HASH_LUT_SIZE_512;
else {
- PMD_DRV_LOG(ERR, "Hash lookup table size (%u) not supported\n",
- hw->func_caps.rss_table_size);
+ PMD_DRV_LOG(ERR, "Hash lookup table size (%u) not supported",
+ hw->func_caps.rss_table_size);
return I40E_ERR_PARAM;
}
- PMD_DRV_LOG(INFO, "Hardware capability of hash lookup table size: %u\n",
+ PMD_DRV_LOG(INFO, "Hardware capability of hash lookup table size: %u",
hw->func_caps.rss_table_size);
pf->hash_lut_size = hw->func_caps.rss_table_size;
flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH;
break;
default:
- PMD_DRV_LOG(ERR, "Invalid MAC match type\n");
+ PMD_DRV_LOG(ERR, "Invalid MAC match type");
ret = I40E_ERR_PARAM;
goto DONE;
}
flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH;
break;
default:
- PMD_DRV_LOG(ERR, "Invalid MAC filter type\n");
+ PMD_DRV_LOG(ERR, "Invalid MAC filter type");
ret = I40E_ERR_PARAM;
goto DONE;
}
if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
filter_type == RTE_MACVLAN_HASH_MATCH) {
if (vlan_num == 0) {
- PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0\n");
+ PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0");
return I40E_ERR_PARAM;
}
} else if (filter_type == RTE_MAC_PERFECT_MATCH ||
int ret = -EINVAL;
val = I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2));
- PMD_DRV_LOG(DEBUG, "Read original GL_PRS_FVBM with 0x%08x\n", val);
+ PMD_DRV_LOG(DEBUG, "Read original GL_PRS_FVBM with 0x%08x", val);
if (len == 3) {
reg = val | I40E_GL_PRS_FVBM_MSK_ENA;
} else {
ret = 0;
}
- PMD_DRV_LOG(DEBUG, "Read modified GL_PRS_FVBM with 0x%08x\n",
+ PMD_DRV_LOG(DEBUG, "Read modified GL_PRS_FVBM with 0x%08x",
I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2)));
return ret;
{
uint32_t reg = i40e_read_rx_ctl(hw, addr);
- PMD_DRV_LOG(DEBUG, "[0x%08x] original: 0x%08x\n", addr, reg);
+ PMD_DRV_LOG(DEBUG, "[0x%08x] original: 0x%08x", addr, reg);
if (reg != val)
i40e_write_rx_ctl(hw, addr, val);
- PMD_DRV_LOG(DEBUG, "[0x%08x] after: 0x%08x\n", addr,
+ PMD_DRV_LOG(DEBUG, "[0x%08x] after: 0x%08x", addr,
(uint32_t)i40e_read_rx_ctl(hw, addr));
}
filter->queue, add, &stats, NULL);
PMD_DRV_LOG(INFO,
- "add/rem control packet filter, return %d, mac_etype_used = %u, etype_used = %u, mac_etype_free = %u, etype_free = %u\n",
+ "add/rem control packet filter, return %d, mac_etype_used = %u, etype_used = %u, mac_etype_free = %u, etype_free = %u",
ret, stats.mac_etype_used, stats.etype_used,
stats.mac_etype_free, stats.etype_free);
if (ret < 0)
FALSE);
break;
default:
- PMD_DRV_LOG(ERR, "unsupported operation %u\n", filter_op);
+ PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op);
ret = -ENOSYS;
break;
}
old_cfg->etsrec = old_cfg->etscfg;
ret = i40e_set_dcb_config(hw);
if (ret) {
- PMD_INIT_LOG(ERR,
- "Set DCB Config failed, err %s aq_err %s\n",
+ PMD_INIT_LOG(ERR, "Set DCB Config failed, err %s aq_err %s",
i40e_stat_str(hw, ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
return ret;
ret = i40e_config_switch_comp_tc(main_vsi->veb, tc_map);
if (ret)
PMD_INIT_LOG(WARNING,
- "Failed configuring TC for VEB seid=%d\n",
+ "Failed configuring TC for VEB seid=%d",
main_vsi->veb->seid);
}
/* Update each VSI */
I40E_DEFAULT_TCMAP);
if (ret)
PMD_INIT_LOG(WARNING,
- "Failed configuring TC for VSI seid=%d\n",
+ "Failed configuring TC for VSI seid=%d",
vsi_list->vsi->seid);
/* continue */
}
/* mtu setting is forbidden if port is start */
if (dev_data->dev_started) {
- PMD_DRV_LOG(ERR,
- "port %d must be stopped before configuration\n",
+ PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
dev_data->port_id);
return -EBUSY;
}
}
PMD_DRV_LOG(INFO, "Ethertype filter:"
" mac_etype_used = %u, etype_used = %u,"
- " mac_etype_free = %u, etype_free = %u\n",
+ " mac_etype_free = %u, etype_free = %u",
stats.mac_etype_used, stats.etype_used,
stats.mac_etype_free, stats.etype_free);
}
if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
filter_type == RTE_MACVLAN_HASH_MATCH) {
if (vlan_num == 0) {
- PMD_DRV_LOG(ERR,
- "VLAN number shouldn't be 0\n");
+ PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0");
return I40E_ERR_PARAM;
}
} else if (filter_type == RTE_MAC_PERFECT_MATCH ||
ret = i40evf_execute_vf_cmd(dev, &args);
if (ret)
PMD_DRV_LOG(ERR, "Failed to execute command of "
- "I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES\n");
+ "I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES");
return ret;
}
ret = i40evf_execute_vf_cmd(dev, &args);
if (ret)
PMD_DRV_LOG(ERR, "Failed to execute command of "
- "I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT\n");
+ "I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT");
return ret;
}
switch (pf_msg->event) {
case I40E_VIRTCHNL_EVENT_RESET_IMPENDING:
- PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_RESET_IMPENDING event\n");
+ PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_RESET_IMPENDING event");
_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, NULL);
break;
case I40E_VIRTCHNL_EVENT_LINK_CHANGE:
- PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_LINK_CHANGE event\n");
+ PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_LINK_CHANGE event");
vf->link_up = pf_msg->event_data.link_event.link_status;
vf->link_speed = pf_msg->event_data.link_event.link_speed;
break;
case I40E_VIRTCHNL_EVENT_PF_DRIVER_CLOSE:
- PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_PF_DRIVER_CLOSE event\n");
+ PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_PF_DRIVER_CLOSE event");
break;
default:
PMD_DRV_LOG(ERR, " unknown event received %u", pf_msg->event);
"expect %u, get %u",
vf->pend_cmd, msg_opc);
PMD_DRV_LOG(DEBUG, "adminq response is received,"
- " opcode = %d\n", msg_opc);
+ " opcode = %d", msg_opc);
}
break;
default:
/* No interrupt event indicated */
if (!(icr0 & I40E_VFINT_ICR01_INTEVENT_MASK)) {
- PMD_DRV_LOG(DEBUG, "No interrupt event, nothing to do\n");
+ PMD_DRV_LOG(DEBUG, "No interrupt event, nothing to do");
goto done;
}
if (icr0 & I40E_VFINT_ICR01_ADMINQ_MASK) {
- PMD_DRV_LOG(DEBUG, "ICR01_ADMINQ is reported\n");
+ PMD_DRV_LOG(DEBUG, "ICR01_ADMINQ is reported");
i40evf_handle_aq_msg(dev);
}
/* Link Status Change interrupt */
if (icr0 & I40E_VFINT_ICR01_LINK_STAT_CHANGE_MASK)
PMD_DRV_LOG(DEBUG, "LINK_STAT_CHANGE is reported,"
- " do nothing\n");
+ " do nothing");
done:
i40evf_enable_irq0(hw);
dev->data->nb_rx_queues * sizeof(int), 0);
if (!intr_handle->intr_vec) {
PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
- " intr_vec\n", dev->data->nb_rx_queues);
+ " intr_vec", dev->data->nb_rx_queues);
return -ENOMEM;
}
}
if (reta_size != ETH_RSS_RETA_SIZE_64) {
PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
"(%d) doesn't match the number of hardware can "
- "support (%d)\n", reta_size, ETH_RSS_RETA_SIZE_64);
+ "support (%d)", reta_size, ETH_RSS_RETA_SIZE_64);
return -EINVAL;
}
if (reta_size != ETH_RSS_RETA_SIZE_64) {
PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
"(%d) doesn't match the number of hardware can "
- "support (%d)\n", reta_size, ETH_RSS_RETA_SIZE_64);
+ "support (%d)", reta_size, ETH_RSS_RETA_SIZE_64);
return -EINVAL;
}
if (vf->dev_data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
i40evf_disable_rss(vf);
- PMD_DRV_LOG(DEBUG, "RSS not configured\n");
+ PMD_DRV_LOG(DEBUG, "RSS not configured");
return 0;
}
rss_conf = vf->dev_data->dev_conf.rx_adv_conf.rss_conf;
if ((rss_conf.rss_hf & I40E_RSS_OFFLOAD_ALL) == 0) {
i40evf_disable_rss(vf);
- PMD_DRV_LOG(DEBUG, "No hash flag is set\n");
+ PMD_DRV_LOG(DEBUG, "No hash flag is set");
return 0;
}
/* mtu setting is forbidden if port is start */
if (dev_data->dev_started) {
- PMD_DRV_LOG(ERR,
- "port %d must be stopped before configuration\n",
+ PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
dev_data->port_id);
return -EBUSY;
}
I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
#endif /* RTE_LIBRTE_I40E_DEBUG_DRIVER */
- PMD_DRV_LOG(INFO, "FDIR: Guarant count: %d, Best count: %d\n",
+ PMD_DRV_LOG(INFO, "FDIR: Guarant count: %d, Best count: %d",
guarant_cnt, best_cnt);
}
vc_vqci->num_queue_pairs > I40E_MAX_VSI_QP ||
msglen < I40E_VIRTCHNL_CONFIG_VSI_QUEUES_SIZE(vc_vqci,
vc_vqci->num_queue_pairs)) {
- PMD_DRV_LOG(ERR, "vsi_queue_config_info argument wrong\n");
+ PMD_DRV_LOG(ERR, "vsi_queue_config_info argument wrong");
ret = I40E_ERR_PARAM;
goto send_msg;
}
vc_vqcei->num_queue_pairs > I40E_MAX_VSI_QP ||
msglen < I40E_VIRTCHNL_CONFIG_VSI_QUEUES_SIZE(vc_vqcei,
vc_vqcei->num_queue_pairs)) {
- PMD_DRV_LOG(ERR, "vsi_queue_config_ext_info argument wrong\n");
+ PMD_DRV_LOG(ERR, "vsi_queue_config_ext_info argument wrong");
ret = I40E_ERR_PARAM;
goto send_msg;
}
*cd_tunneling |= I40E_TXD_CTX_GRE_TUNNELING;
break;
default:
- PMD_TX_LOG(ERR, "Tunnel type not supported\n");
+ PMD_TX_LOG(ERR, "Tunnel type not supported");
return;
}
dev->data->nb_rx_queues * sizeof(int), 0);
if (intr_handle->intr_vec == NULL) {
PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
- " intr_vec\n", dev->data->nb_rx_queues);
+ " intr_vec", dev->data->nb_rx_queues);
return -ENOMEM;
}
}
ixgbe_dev_interrupt_handler, dev);
if (dev->data->dev_conf.intr_conf.lsc != 0)
PMD_INIT_LOG(INFO, "lsc won't enable because of"
- " no intr multiplex\n");
+ " no intr multiplex");
}
/* check if rxq interrupt is enabled */
if (reta_size != sp_reta_size) {
PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
"(%d) doesn't match the number hardware can supported "
- "(%d)\n", reta_size, sp_reta_size);
+ "(%d)", reta_size, sp_reta_size);
return -EINVAL;
}
if (reta_size != sp_reta_size) {
PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
"(%d) doesn't match the number hardware can supported "
- "(%d)\n", reta_size, sp_reta_size);
+ "(%d)", reta_size, sp_reta_size);
return -EINVAL;
}
dev->data->nb_rx_queues * sizeof(int), 0);
if (intr_handle->intr_vec == NULL) {
PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
- " intr_vec\n", dev->data->nb_rx_queues);
+ " intr_vec", dev->data->nb_rx_queues);
return -ENOMEM;
}
}
(struct rte_eth_syn_filter *)arg);
break;
default:
- PMD_DRV_LOG(ERR, "unsupported operation %u\n", filter_op);
+ PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op);
ret = -EINVAL;
break;
}
/* For informational purposes only */
if (i >= IXGBE_MAX_SECTX_POLL)
PMD_DRV_LOG(DEBUG, "Tx unit being enabled before security "
- "path fully disabled. Continuing with init.\n");
+ "path fully disabled. Continuing with init.");
return IXGBE_SUCCESS;
}
if (new == 0)
break;
if (new & NFP_NET_CFG_UPDATE_ERR) {
- PMD_INIT_LOG(ERR, "Reconfig error: 0x%08x\n", new);
+ PMD_INIT_LOG(ERR, "Reconfig error: 0x%08x", new);
return -1;
}
if (cnt >= NFP_NET_POLL_TIMEOUT) {
PMD_INIT_LOG(ERR, "Reconfig timeout for 0x%08x after"
- " %dms\n", update, cnt);
+ " %dms", update, cnt);
rte_panic("Exiting\n");
}
nanosleep(&wait, 0); /* waiting for a 1ms */
* Reconfig errors imply situations where they can be handled.
* Otherwise, rte_panic is called inside __nfp_net_reconfig
*/
- PMD_INIT_LOG(ERR, "Error nfp_net reconfig for ctrl: %x update: %x\n",
+ PMD_INIT_LOG(ERR, "Error nfp_net reconfig for ctrl: %x update: %x",
ctrl, update);
return -EIO;
}
* called after that internal process
*/
- PMD_INIT_LOG(DEBUG, "Configure\n");
+ PMD_INIT_LOG(DEBUG, "Configure");
dev_conf = &dev->data->dev_conf;
rxmode = &dev_conf->rxmode;
/* Checking TX mode */
if (txmode->mq_mode) {
- PMD_INIT_LOG(INFO, "TX mq_mode DCB and VMDq not supported\n");
+ PMD_INIT_LOG(INFO, "TX mq_mode DCB and VMDq not supported");
return -EINVAL;
}
update = NFP_NET_CFG_UPDATE_RSS;
new_ctrl = NFP_NET_CFG_CTRL_RSS;
} else {
- PMD_INIT_LOG(INFO, "RSS not supported\n");
+ PMD_INIT_LOG(INFO, "RSS not supported");
return -EINVAL;
}
}
if (rxmode->split_hdr_size) {
- PMD_INIT_LOG(INFO, "rxmode does not support split header\n");
+ PMD_INIT_LOG(INFO, "rxmode does not support split header");
return -EINVAL;
}
if (hw->cap & NFP_NET_CFG_CTRL_RXCSUM) {
new_ctrl |= NFP_NET_CFG_CTRL_RXCSUM;
} else {
- PMD_INIT_LOG(INFO, "RXCSUM not supported\n");
+ PMD_INIT_LOG(INFO, "RXCSUM not supported");
return -EINVAL;
}
}
if (rxmode->hw_vlan_filter) {
- PMD_INIT_LOG(INFO, "VLAN filter not supported\n");
+ PMD_INIT_LOG(INFO, "VLAN filter not supported");
return -EINVAL;
}
if (hw->cap & NFP_NET_CFG_CTRL_RXVLAN) {
new_ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
} else {
- PMD_INIT_LOG(INFO, "hw vlan strip not supported\n");
+ PMD_INIT_LOG(INFO, "hw vlan strip not supported");
return -EINVAL;
}
}
if (rxmode->hw_vlan_extend) {
- PMD_INIT_LOG(INFO, "VLAN extended not supported\n");
+ PMD_INIT_LOG(INFO, "VLAN extended not supported");
return -EINVAL;
}
/* this is handled in rte_eth_dev_configure */
if (rxmode->hw_strip_crc) {
- PMD_INIT_LOG(INFO, "strip CRC not supported\n");
+ PMD_INIT_LOG(INFO, "strip CRC not supported");
return -EINVAL;
}
if (rxmode->enable_scatter) {
- PMD_INIT_LOG(INFO, "Scatter not supported\n");
+ PMD_INIT_LOG(INFO, "Scatter not supported");
return -EINVAL;
}
dev->data->nb_rx_queues * sizeof(int), 0);
if (!intr_handle->intr_vec) {
PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
- " intr_vec\n", dev->data->nb_rx_queues);
+ " intr_vec", dev->data->nb_rx_queues);
return -ENOMEM;
}
}
hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
if (intr_handle->type == RTE_INTR_HANDLE_UIO) {
- PMD_INIT_LOG(INFO, "VF: enabling RX interrupt with UIO\n");
+ PMD_INIT_LOG(INFO, "VF: enabling RX interrupt with UIO");
/* UIO just supports one queue and no LSC*/
nn_cfg_writeb(hw, NFP_NET_CFG_RXR_VEC(0), 0);
} else {
- PMD_INIT_LOG(INFO, "VF: enabling RX interrupt with VFIO\n");
+ PMD_INIT_LOG(INFO, "VF: enabling RX interrupt with VFIO");
for (i = 0; i < dev->data->nb_rx_queues; i++)
/*
* The first msix vector is reserved for non
hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- PMD_INIT_LOG(DEBUG, "Start\n");
+ PMD_INIT_LOG(DEBUG, "Start");
/* Disabling queues just in case... */
nfp_net_disable_queues(dev);
{
int i;
- PMD_INIT_LOG(DEBUG, "Stop\n");
+ PMD_INIT_LOG(DEBUG, "Stop");
nfp_net_disable_queues(dev);
struct nfp_net_hw *hw;
struct rte_pci_device *pci_dev;
- PMD_INIT_LOG(DEBUG, "Close\n");
+ PMD_INIT_LOG(DEBUG, "Close");
hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
pci_dev = RTE_DEV_TO_PCI(dev->device);
hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
if (!(hw->cap & NFP_NET_CFG_CTRL_PROMISC)) {
- PMD_INIT_LOG(INFO, "Promiscuous mode not supported\n");
+ PMD_INIT_LOG(INFO, "Promiscuous mode not supported");
return;
}
rxq = (struct nfp_net_rxq *)dev->data->rx_queues[queue_idx];
if (rxq == NULL) {
- PMD_INIT_LOG(ERR, "Bad queue: %u\n", queue_idx);
+ PMD_INIT_LOG(ERR, "Bad queue: %u", queue_idx);
return 0;
}
if (unlikely((pkt->nb_segs > 1) &&
!(hw->cap & NFP_NET_CFG_CTRL_GATHER))) {
- PMD_INIT_LOG(INFO, "NFP_NET_CFG_CTRL_GATHER not set\n");
+ PMD_INIT_LOG(INFO, "NFP_NET_CFG_CTRL_GATHER not set");
rte_panic("Multisegment packet unsupported\n");
}
hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
- PMD_INIT_LOG(DEBUG, "nfp_net: device (%u:%u) %u:%u:%u:%u\n",
+ PMD_INIT_LOG(DEBUG, "nfp_net: device (%u:%u) %u:%u:%u:%u",
pci_dev->id.vendor_id, pci_dev->id.device_id,
pci_dev->addr.domain, pci_dev->addr.bus,
pci_dev->addr.devid, pci_dev->addr.function);
return -ENODEV;
}
- PMD_INIT_LOG(DEBUG, "tx_bar_off: 0x%08x\n", tx_bar_off);
- PMD_INIT_LOG(DEBUG, "rx_bar_off: 0x%08x\n", rx_bar_off);
+ PMD_INIT_LOG(DEBUG, "tx_bar_off: 0x%08x", tx_bar_off);
+ PMD_INIT_LOG(DEBUG, "rx_bar_off: 0x%08x", rx_bar_off);
hw->tx_bar = (uint8_t *)pci_dev->mem_resource[2].addr + tx_bar_off;
hw->rx_bar = (uint8_t *)pci_dev->mem_resource[2].addr + rx_bar_off;
- PMD_INIT_LOG(DEBUG, "ctrl_bar: %p, tx_bar: %p, rx_bar: %p\n",
+ PMD_INIT_LOG(DEBUG, "ctrl_bar: %p, tx_bar: %p, rx_bar: %p",
hw->ctrl_bar, hw->tx_bar, hw->rx_bar);
nfp_net_cfg_queue_setup(hw);
else
hw->rx_offset = nn_cfg_readl(hw, NFP_NET_CFG_RX_OFFSET_ADDR);
- PMD_INIT_LOG(INFO, "VER: %#x, Maximum supported MTU: %d\n",
+ PMD_INIT_LOG(INFO, "VER: %#x, Maximum supported MTU: %d",
hw->ver, hw->max_mtu);
- PMD_INIT_LOG(INFO, "CAP: %#x, %s%s%s%s%s%s%s%s%s\n", hw->cap,
+ PMD_INIT_LOG(INFO, "CAP: %#x, %s%s%s%s%s%s%s%s%s", hw->cap,
hw->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "",
hw->cap & NFP_NET_CFG_CTRL_RXCSUM ? "RXCSUM " : "",
hw->cap & NFP_NET_CFG_CTRL_TXCSUM ? "TXCSUM " : "",
hw->stride_rx = stride;
hw->stride_tx = stride;
- PMD_INIT_LOG(INFO, "max_rx_queues: %u, max_tx_queues: %u\n",
+ PMD_INIT_LOG(INFO, "max_rx_queues: %u, max_tx_queues: %u",
hw->max_rx_queues, hw->max_tx_queues);
/* Initializing spinlock for reconfigs */
{
unsigned int i;
- PMD_TX_LOG(DEBUG, txq, "releasing %u mbufs\n", txq->nb_tx_desc);
+ PMD_TX_LOG(DEBUG, txq, "releasing %u mbufs", txq->nb_tx_desc);
if (txq->sw_tx_ring) {
for (i = 0; i < txq->nb_tx_desc; i++) {
*/
rte_wmb();
- PMD_RX_LOG(DEBUG, rxq, "bd_prod %u cqe_prod %u\n", bd_prod, cqe_prod);
+ PMD_RX_LOG(DEBUG, rxq, "bd_prod %u cqe_prod %u", bd_prod, cqe_prod);
}
static int qede_start_queues(struct rte_eth_dev *eth_dev, bool clear_stats)
pkt_len;
if (unlikely(!cur_size)) {
PMD_RX_LOG(ERR, rxq, "Length is 0 while %u BDs"
- " left for mapping jumbo\n", num_segs);
+ " left for mapping jumbo", num_segs);
qede_recycle_rx_bd_ring(rxq, qdev, num_segs);
return -EINVAL;
}
cqe_type = cqe->fast_path_regular.type;
if (unlikely(cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH)) {
- PMD_RX_LOG(DEBUG, rxq, "Got a slowath CQE\n");
+ PMD_RX_LOG(DEBUG, rxq, "Got a slowath CQE");
qdev->ops->eth_cqe_completion(edev, fp->id,
(struct eth_slow_path_rx_cqe *)cqe);
PMD_RX_LOG(DEBUG, rxq,
"CQE type = 0x%x, flags = 0x%x, vlan = 0x%x"
- " len = %u, parsing_flags = %d\n",
+ " len = %u, parsing_flags = %d",
cqe_type, fp_cqe->bitfields,
rte_le_to_cpu_16(fp_cqe->vlan_tag),
len, rte_le_to_cpu_16(fp_cqe->pars_flags.flags));
rx_mb->ol_flags = 0;
if (qede_tunn_exist(parse_flag)) {
- PMD_RX_LOG(DEBUG, rxq, "Rx tunneled packet\n");
+ PMD_RX_LOG(DEBUG, rxq, "Rx tunneled packet");
if (unlikely(qede_check_tunn_csum_l4(parse_flag))) {
PMD_RX_LOG(ERR, rxq,
- "L4 csum failed, flags = 0x%x\n",
+ "L4 csum failed, flags = 0x%x",
parse_flag);
rxq->rx_hw_errors++;
rx_mb->ol_flags |= PKT_RX_L4_CKSUM_BAD;
tunn_parse_flag);
}
} else {
- PMD_RX_LOG(DEBUG, rxq, "Rx non-tunneled packet\n");
+ PMD_RX_LOG(DEBUG, rxq, "Rx non-tunneled packet");
if (unlikely(qede_check_notunn_csum_l4(parse_flag))) {
PMD_RX_LOG(ERR, rxq,
- "L4 csum failed, flags = 0x%x\n",
+ "L4 csum failed, flags = 0x%x",
parse_flag);
rxq->rx_hw_errors++;
rx_mb->ol_flags |= PKT_RX_L4_CKSUM_BAD;
} else if (unlikely(qede_check_notunn_csum_l3(rx_mb,
parse_flag))) {
PMD_RX_LOG(ERR, rxq,
- "IP csum failed, flags = 0x%x\n",
+ "IP csum failed, flags = 0x%x",
parse_flag);
rxq->rx_hw_errors++;
rx_mb->ol_flags |= PKT_RX_IP_CKSUM_BAD;
}
}
- PMD_RX_LOG(INFO, rxq, "packet_type 0x%x\n", rx_mb->packet_type);
+ PMD_RX_LOG(INFO, rxq, "packet_type 0x%x", rx_mb->packet_type);
if (unlikely(qede_alloc_rx_buffer(rxq) != 0)) {
PMD_RX_LOG(ERR, rxq,
"New buffer allocation failed,"
- "dropping incoming packet\n");
+ "dropping incoming packet");
qede_recycle_rx_bd_ring(rxq, qdev, fp_cqe->bd_num);
rte_eth_devices[rxq->port_id].
data->rx_mbuf_alloc_failed++;
qede_rx_bd_ring_consume(rxq);
if (fp_cqe->bd_num > 1) {
PMD_RX_LOG(DEBUG, rxq, "Jumbo-over-BD packet: %02x BDs"
- " len on first: %04x Total Len: %04x\n",
+ " len on first: %04x Total Len: %04x",
fp_cqe->bd_num, len, pkt_len);
num_segs = fp_cqe->bd_num - 1;
seg1 = rx_mb;
for (j = 0; j < num_segs; j++) {
if (qede_alloc_rx_buffer(rxq)) {
PMD_RX_LOG(ERR, rxq,
- "Buffer allocation failed\n");
+ "Buffer allocation failed");
rte_eth_devices[rxq->port_id].
data->rx_mbuf_alloc_failed++;
rxq->rx_alloc_errors++;
if (qdev->rss_enable && htype) {
rx_mb->ol_flags |= PKT_RX_RSS_HASH;
rx_mb->hash.rss = rte_le_to_cpu_32(fp_cqe->rss_hash);
- PMD_RX_LOG(DEBUG, rxq, "Hash result 0x%x\n",
+ PMD_RX_LOG(DEBUG, rxq, "Hash result 0x%x",
rx_mb->hash.rss);
}
sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
if (rx_pkt == nb_pkts) {
PMD_RX_LOG(DEBUG, rxq,
- "Budget reached nb_pkts=%u received=%u\n",
+ "Budget reached nb_pkts=%u received=%u",
rx_pkt, nb_pkts);
break;
}
rxq->rcv_pkts += rx_pkt;
- PMD_RX_LOG(DEBUG, rxq, "rx_pkts=%u core=%d\n", rx_pkt, rte_lcore_id());
+ PMD_RX_LOG(DEBUG, rxq, "rx_pkts=%u core=%d", rx_pkt, rte_lcore_id());
return rx_pkt;
}
struct rte_mbuf *mbuf = txq->sw_tx_ring[idx].mbuf;
if (unlikely(!mbuf)) {
- PMD_TX_LOG(ERR, txq, "null mbuf\n");
+ PMD_TX_LOG(ERR, txq, "null mbuf");
PMD_TX_LOG(ERR, txq,
- "tx_desc %u tx_avail %u tx_cons %u tx_prod %u\n",
+ "tx_desc %u tx_avail %u tx_cons %u tx_prod %u",
txq->nb_tx_desc, txq->nb_tx_avail, idx,
TX_PROD(txq));
return -1;
while (hw_bd_cons != ecore_chain_get_cons_idx(&txq->tx_pbl)) {
if (qede_free_tx_pkt(edev, txq)) {
PMD_TX_LOG(ERR, txq,
- "hw_bd_cons = %u, chain_cons = %u\n",
+ "hw_bd_cons = %u, chain_cons = %u",
hw_bd_cons,
ecore_chain_get_cons_idx(&txq->tx_pbl));
break;
tx_compl++;
}
- PMD_TX_LOG(DEBUG, txq, "Tx compl %u sw_tx_cons %u avail %u\n",
+ PMD_TX_LOG(DEBUG, txq, "Tx compl %u sw_tx_cons %u avail %u",
tx_compl, txq->sw_tx_cons, txq->nb_tx_avail);
return tx_compl;
}
memset(bd2, 0, sizeof(*bd2));
mapping = rte_mbuf_data_dma_addr(m_seg);
QEDE_BD_SET_ADDR_LEN(bd2, mapping, m_seg->data_len);
- PMD_TX_LOG(DEBUG, txq, "BD2 len %04x\n",
+ PMD_TX_LOG(DEBUG, txq, "BD2 len %04x",
m_seg->data_len);
} else if (nb_segs == 2) {
bd3 = (struct eth_tx_3rd_bd *)
memset(bd3, 0, sizeof(*bd3));
mapping = rte_mbuf_data_dma_addr(m_seg);
QEDE_BD_SET_ADDR_LEN(bd3, mapping, m_seg->data_len);
- PMD_TX_LOG(DEBUG, txq, "BD3 len %04x\n",
+ PMD_TX_LOG(DEBUG, txq, "BD3 len %04x",
m_seg->data_len);
} else {
tx_bd = (struct eth_tx_bd *)
memset(tx_bd, 0, sizeof(*tx_bd));
mapping = rte_mbuf_data_dma_addr(m_seg);
QEDE_BD_SET_ADDR_LEN(tx_bd, mapping, m_seg->data_len);
- PMD_TX_LOG(DEBUG, txq, "BD len %04x\n",
+ PMD_TX_LOG(DEBUG, txq, "BD len %04x",
m_seg->data_len);
}
nb_segs++;
fp = &qdev->fp_array[QEDE_RSS_COUNT(qdev) + txq->queue_id];
if (unlikely(txq->nb_tx_avail < txq->tx_free_thresh)) {
- PMD_TX_LOG(DEBUG, txq, "send=%u avail=%u free_thresh=%u\n",
+ PMD_TX_LOG(DEBUG, txq, "send=%u avail=%u free_thresh=%u",
nb_pkts, txq->nb_tx_avail, txq->tx_free_thresh);
(void)qede_process_tx_compl(edev, txq);
}
nb_tx_pkts = RTE_MIN(nb_pkts, (txq->nb_tx_avail /
ETH_TX_MAX_BDS_PER_NON_LSO_PACKET));
if (unlikely(nb_tx_pkts == 0)) {
- PMD_TX_LOG(DEBUG, txq, "Out of BDs nb_pkts=%u avail=%u\n",
+ PMD_TX_LOG(DEBUG, txq, "Out of BDs nb_pkts=%u avail=%u",
nb_pkts, txq->nb_tx_avail);
return 0;
}
/* Map MBUF linear data for DMA and set in the first BD */
QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_dma_addr(mbuf),
mbuf->data_len);
- PMD_TX_LOG(INFO, txq, "BD1 len %04x\n", mbuf->data_len);
+ PMD_TX_LOG(INFO, txq, "BD1 len %04x", mbuf->data_len);
if (RTE_ETH_IS_TUNNEL_PKT(mbuf->packet_type)) {
- PMD_TX_LOG(INFO, txq, "Tx tunnel packet\n");
+ PMD_TX_LOG(INFO, txq, "Tx tunnel packet");
/* First indicate its a tunnel pkt */
bd1->data.bd_flags.bitfields |=
ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK <<
/* Outer IP checksum offload */
if (mbuf->ol_flags & PKT_TX_OUTER_IP_CKSUM) {
- PMD_TX_LOG(INFO, txq, "OuterIP csum offload\n");
+ PMD_TX_LOG(INFO, txq, "OuterIP csum offload");
bd1->data.bd_flags.bitfields |=
ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK <<
ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT;
/* Descriptor based VLAN insertion */
if (mbuf->ol_flags & (PKT_TX_VLAN_PKT | PKT_TX_QINQ_PKT)) {
- PMD_TX_LOG(INFO, txq, "Insert VLAN 0x%x\n",
+ PMD_TX_LOG(INFO, txq, "Insert VLAN 0x%x",
mbuf->vlan_tci);
bd1->data.vlan = rte_cpu_to_le_16(mbuf->vlan_tci);
bd1->data.bd_flags.bitfields |=
/* Offload the IP checksum in the hardware */
if (mbuf->ol_flags & PKT_TX_IP_CKSUM) {
- PMD_TX_LOG(INFO, txq, "IP csum offload\n");
+ PMD_TX_LOG(INFO, txq, "IP csum offload");
bd1->data.bd_flags.bitfields |=
1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
}
/* L4 checksum offload (tcp or udp) */
if (mbuf->ol_flags & (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) {
- PMD_TX_LOG(INFO, txq, "L4 csum offload\n");
+ PMD_TX_LOG(INFO, txq, "L4 csum offload");
bd1->data.bd_flags.bitfields |=
1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
/* IPv6 + extn. -> later */
rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq->tx_pbl));
nb_pkt_sent++;
txq->xmit_pkts++;
- PMD_TX_LOG(INFO, txq, "nbds = %d pkt_len = %04x\n",
+ PMD_TX_LOG(INFO, txq, "nbds = %d pkt_len = %04x",
bd1->data.nbds, mbuf->pkt_len);
}
/* Check again for Tx completions */
(void)qede_process_tx_compl(edev, txq);
- PMD_TX_LOG(DEBUG, txq, "to_send=%u can_send=%u sent=%u core=%d\n",
+ PMD_TX_LOG(DEBUG, txq, "to_send=%u can_send=%u sent=%u core=%d",
nb_pkts, tx_count, nb_pkt_sent, rte_lcore_id());
return nb_pkt_sent;
evq->exception = B_TRUE;
sfc_err(evq->sa,
"EVQ %u RxQ %u invalid RX abort "
- "(id=%#x size=%u flags=%#x); needs restart\n",
+ "(id=%#x size=%u flags=%#x); needs restart",
evq->evq_index, sfc_rxq_sw_index(rxq),
id, size, flags);
goto done;
sfc_err(evq->sa,
"EVQ %u RxQ %u completion out of order "
- "(id=%#x delta=%u flags=%#x); needs restart\n",
+ "(id=%#x delta=%u flags=%#x); needs restart",
evq->evq_index, sfc_rxq_sw_index(rxq), id, delta,
flags);
int ret;
if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
- PMD_INIT_LOG(INFO, "host does not support rx control\n");
+ PMD_INIT_LOG(INFO, "host does not support rx control");
return;
}
int ret;
if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
- PMD_INIT_LOG(INFO, "host does not support rx control\n");
+ PMD_INIT_LOG(INFO, "host does not support rx control");
return;
}
int ret;
if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
- PMD_INIT_LOG(INFO, "host does not support rx control\n");
+ PMD_INIT_LOG(INFO, "host does not support rx control");
return;
}
int ret;
if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
- PMD_INIT_LOG(INFO, "host does not support rx control\n");
+ PMD_INIT_LOG(INFO, "host does not support rx control");
return;
}
uint32_t frame_size = mtu + ether_hdr_len;
if (mtu < ETHER_MIN_MTU || frame_size > VIRTIO_MAX_RX_PKTLEN) {
- PMD_INIT_LOG(ERR, "MTU should be between %d and %d\n",
+ PMD_INIT_LOG(ERR, "MTU should be between %d and %d",
ETHER_MIN_MTU, VIRTIO_MAX_RX_PKTLEN - ether_hdr_len);
return -EINVAL;
}
uint32_t i;
struct virtio_hw *hw = dev->data->dev_private;
- PMD_INIT_LOG(INFO, "queue/interrupt binding\n");
+ PMD_INIT_LOG(INFO, "queue/interrupt binding");
for (i = 0; i < dev->data->nb_rx_queues; ++i) {
dev->intr_handle->intr_vec[i] = i + 1;
if (VTPCI_OPS(hw)->set_queue_irq(hw, hw->vqs[i * 2], i + 1) ==
uint32_t i;
struct virtio_hw *hw = dev->data->dev_private;
- PMD_INIT_LOG(INFO, "queue/interrupt unbinding\n");
+ PMD_INIT_LOG(INFO, "queue/interrupt unbinding");
for (i = 0; i < dev->data->nb_rx_queues; ++i)
VTPCI_OPS(hw)->set_queue_irq(hw,
hw->vqs[i * VTNET_CQ],
cookie = (struct rte_mbuf *)vq->vq_descx[desc_idx].cookie;
if (unlikely(cookie == NULL)) {
- PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u\n",
+ PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
vq->vq_used_cons_idx);
break;
}
*/
callfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
if (callfd < 0) {
- PMD_DRV_LOG(ERR, "callfd error, %s\n", strerror(errno));
+ PMD_DRV_LOG(ERR, "callfd error, %s", strerror(errno));
return -1;
}
file.index = queue_sel;
*/
kickfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
if (kickfd < 0) {
- PMD_DRV_LOG(ERR, "kickfd error, %s\n", strerror(errno));
+ PMD_DRV_LOG(ERR, "kickfd error, %s", strerror(errno));
return -1;
}
file.index = queue_sel;
for (i = 0; i < ETHER_ADDR_LEN; ++i)
dev->mac_addr[i] = ((const uint8_t *)src)[i];
else
- PMD_DRV_LOG(ERR, "not supported offset=%zu, len=%d\n",
+ PMD_DRV_LOG(ERR, "not supported offset=%zu, len=%d",
offset, length);
}
}
if (write(dev->kickfds[vq->vq_queue_index], &buf, sizeof(buf)) < 0)
- PMD_DRV_LOG(ERR, "failed to kick backend: %s\n",
+ PMD_DRV_LOG(ERR, "failed to kick backend: %s",
strerror(errno));
}
goto end;
}
} else {
- PMD_INIT_LOG(ERR, "arg %s is mandatory for virtio_user\n",
+ PMD_INIT_LOG(ERR, "arg %s is mandatory for virtio_user",
VIRTIO_USER_ARG_QUEUE_SIZE);
goto end;
}
if (!name)
return -EINVAL;
- PMD_DRV_LOG(INFO, "Un-Initializing %s\n", name);
+ PMD_DRV_LOG(INFO, "Un-Initializing %s", name);
eth_dev = rte_eth_dev_allocated(name);
if (!eth_dev)
return -ENODEV;