drivers/net: remove redundant new line from logs
authorFerruh Yigit <ferruh.yigit@intel.com>
Fri, 27 Jan 2017 15:16:32 +0000 (15:16 +0000)
committerFerruh Yigit <ferruh.yigit@intel.com>
Mon, 30 Jan 2017 21:18:27 +0000 (22:18 +0100)
Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
17 files changed:
drivers/net/bnx2x/bnx2x.c
drivers/net/e1000/em_ethdev.c
drivers/net/e1000/igb_ethdev.c
drivers/net/ena/ena_ethdev.c
drivers/net/i40e/i40e_ethdev.c
drivers/net/i40e/i40e_ethdev_vf.c
drivers/net/i40e/i40e_fdir.c
drivers/net/i40e/i40e_pf.c
drivers/net/i40e/i40e_rxtx.c
drivers/net/ixgbe/ixgbe_ethdev.c
drivers/net/nfp/nfp_net.c
drivers/net/qede/qede_rxtx.c
drivers/net/sfc/sfc_ev.c
drivers/net/virtio/virtio_ethdev.c
drivers/net/virtio/virtio_rxtx.c
drivers/net/virtio/virtio_user/virtio_user_dev.c
drivers/net/virtio/virtio_user_ethdev.c

index 0d16a73..cc380bd 100644 (file)
@@ -2220,7 +2220,7 @@ int bnx2x_tx_encap(struct bnx2x_tx_queue *txq, struct rte_mbuf *m0)
        }
 
        PMD_TX_LOG(DEBUG,
-                  "start bd: nbytes %d flags %x vlan %x\n",
+                  "start bd: nbytes %d flags %x vlan %x",
                   tx_start_bd->nbytes,
                   tx_start_bd->bd_flags.as_bitfield,
                   tx_start_bd->vlan_or_ethertype);
index d67fdef..d778785 100644 (file)
@@ -639,7 +639,7 @@ eth_em_start(struct rte_eth_dev *dev)
                                        dev->data->nb_rx_queues * sizeof(int), 0);
                if (intr_handle->intr_vec == NULL) {
                        PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
-                                               " intr_vec\n", dev->data->nb_rx_queues);
+                                               " intr_vec", dev->data->nb_rx_queues);
                        return -ENOMEM;
                }
 
@@ -736,7 +736,7 @@ eth_em_start(struct rte_eth_dev *dev)
                                                (void *)dev);
                if (dev->data->dev_conf.intr_conf.lsc != 0)
                        PMD_INIT_LOG(INFO, "lsc won't enable because of"
-                                    " no intr multiplex\n");
+                                    " no intr multiplexn");
        }
        /* check if rxq interrupt is enabled */
        if (dev->data->dev_conf.intr_conf.rxq != 0)
index 8843dd1..d939774 100644 (file)
@@ -1292,7 +1292,7 @@ eth_igb_start(struct rte_eth_dev *dev)
                                    dev->data->nb_rx_queues * sizeof(int), 0);
                if (intr_handle->intr_vec == NULL) {
                        PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
-                                    " intr_vec\n", dev->data->nb_rx_queues);
+                                    " intr_vec", dev->data->nb_rx_queues);
                        return -ENOMEM;
                }
        }
@@ -1399,7 +1399,7 @@ eth_igb_start(struct rte_eth_dev *dev)
                                             (void *)dev);
                if (dev->data->dev_conf.intr_conf.lsc != 0)
                        PMD_INIT_LOG(INFO, "lsc won't enable because of"
-                                    " no intr multiplex\n");
+                                    " no intr multiplex");
        }
 
        /* check if rxq interrupt is enabled */
@@ -3159,7 +3159,7 @@ igbvf_dev_start(struct rte_eth_dev *dev)
                                    dev->data->nb_rx_queues * sizeof(int), 0);
                if (!intr_handle->intr_vec) {
                        PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
-                                    " intr_vec\n", dev->data->nb_rx_queues);
+                                    " intr_vec", dev->data->nb_rx_queues);
                        return -ENOMEM;
                }
        }
@@ -3378,7 +3378,7 @@ eth_igb_rss_reta_update(struct rte_eth_dev *dev,
        if (reta_size != ETH_RSS_RETA_SIZE_128) {
                PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
                        "(%d) doesn't match the number hardware can supported "
-                       "(%d)\n", reta_size, ETH_RSS_RETA_SIZE_128);
+                       "(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
                return -EINVAL;
        }
 
@@ -3419,7 +3419,7 @@ eth_igb_rss_reta_query(struct rte_eth_dev *dev,
        if (reta_size != ETH_RSS_RETA_SIZE_128) {
                PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
                        "(%d) doesn't match the number hardware can supported "
-                       "(%d)\n", reta_size, ETH_RSS_RETA_SIZE_128);
+                       "(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
                return -EINVAL;
        }
 
@@ -3540,7 +3540,7 @@ eth_igb_syn_filter_handle(struct rte_eth_dev *dev,
                                (struct rte_eth_syn_filter *)arg);
                break;
        default:
-               PMD_DRV_LOG(ERR, "unsupported operation %u\n", filter_op);
+               PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op);
                ret = -EINVAL;
                break;
        }
index a580345..ff36c64 100644 (file)
@@ -746,7 +746,7 @@ static int ena_queue_restart_all(struct rte_eth_dev *dev,
 
                        if (rc) {
                                PMD_INIT_LOG(ERR,
-                                            "failed to restart queue %d type(%d)\n",
+                                            "failed to restart queue %d type(%d)",
                                             i, ring_type);
                                return -1;
                        }
@@ -772,7 +772,7 @@ static int ena_check_valid_conf(struct ena_adapter *adapter)
        uint32_t max_frame_len = ena_get_mtu_conf(adapter);
 
        if (max_frame_len > adapter->max_mtu) {
-               PMD_INIT_LOG(ERR, "Unsupported MTU of %d\n", max_frame_len);
+               PMD_INIT_LOG(ERR, "Unsupported MTU of %d", max_frame_len);
                return -1;
        }
 
@@ -799,7 +799,7 @@ ena_calc_queue_size(struct ena_com_dev *ena_dev,
                queue_size = rte_align32pow2(queue_size >> 1);
 
        if (queue_size == 0) {
-               PMD_INIT_LOG(ERR, "Invalid queue size\n");
+               PMD_INIT_LOG(ERR, "Invalid queue size");
                return -EFAULT;
        }
 
@@ -937,7 +937,7 @@ static int ena_queue_restart(struct ena_ring *ring)
 
        rc = ena_populate_rx_queue(ring, ring->ring_size);
        if ((unsigned int)rc != ring->ring_size) {
-               PMD_INIT_LOG(ERR, "Failed to populate rx ring !\n");
+               PMD_INIT_LOG(ERR, "Failed to populate rx ring !");
                return (-1);
        }
 
@@ -1293,7 +1293,7 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev)
        pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
        adapter->pdev = pci_dev;
 
-       PMD_INIT_LOG(INFO, "Initializing %x:%x:%x.%d\n",
+       PMD_INIT_LOG(INFO, "Initializing %x:%x:%x.%d",
                     pci_dev->addr.domain,
                     pci_dev->addr.bus,
                     pci_dev->addr.devid,
@@ -1310,7 +1310,7 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev)
        else if (adapter->regs)
                ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
        else
-               PMD_INIT_LOG(CRIT, "Failed to access registers BAR(%d)\n",
+               PMD_INIT_LOG(CRIT, "Failed to access registers BAR(%d)",
                             ENA_REGS_BAR);
 
        ena_dev->reg_bar = adapter->regs;
@@ -1324,7 +1324,7 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev)
        /* device specific initialization routine */
        rc = ena_device_init(ena_dev, &get_feat_ctx);
        if (rc) {
-               PMD_INIT_LOG(CRIT, "Failed to init ENA device\n");
+               PMD_INIT_LOG(CRIT, "Failed to init ENA device");
                return -1;
        }
 
@@ -1332,7 +1332,7 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev)
                if (get_feat_ctx.max_queues.max_llq_num == 0) {
                        PMD_INIT_LOG(ERR,
                                     "Trying to use LLQ but llq_num is 0.\n"
-                                    "Fall back into regular queues.\n");
+                                    "Fall back into regular queues.");
                        ena_dev->tx_mem_queue_type =
                                ENA_ADMIN_PLACEMENT_POLICY_HOST;
                        adapter->num_queues =
@@ -1390,7 +1390,7 @@ static int ena_dev_configure(struct rte_eth_dev *dev)
 
        if (!(adapter->state == ENA_ADAPTER_STATE_INIT ||
              adapter->state == ENA_ADAPTER_STATE_STOPPED)) {
-               PMD_INIT_LOG(ERR, "Illegal adapter state: %d\n",
+               PMD_INIT_LOG(ERR, "Illegal adapter state: %d",
                             adapter->state);
                return -1;
        }
index 0937dc4..4492bcc 100644 (file)
@@ -1894,7 +1894,7 @@ i40e_dev_start(struct rte_eth_dev *dev)
                                    0);
                if (!intr_handle->intr_vec) {
                        PMD_INIT_LOG(ERR,
-                               "Failed to allocate %d rx_queues intr_vec\n",
+                               "Failed to allocate %d rx_queues intr_vec",
                                dev->data->nb_rx_queues);
                        return -ENOMEM;
                }
@@ -1969,7 +1969,7 @@ i40e_dev_start(struct rte_eth_dev *dev)
 
                if (dev->data->dev_conf.intr_conf.lsc != 0)
                        PMD_INIT_LOG(INFO,
-                               "lsc won't enable because of no intr multiplex\n");
+                               "lsc won't enable because of no intr multiplex");
        } else if (dev->data->dev_conf.intr_conf.lsc != 0) {
                ret = i40e_aq_set_phy_int_mask(hw,
                                               ~(I40E_AQ_EVENT_LINK_UPDOWN |
@@ -2936,7 +2936,7 @@ i40e_vlan_tpid_set(struct rte_eth_dev *dev,
                else {
                        ret = -EINVAL;
                        PMD_DRV_LOG(ERR,
-                               "Unsupported vlan type in single vlan.\n");
+                               "Unsupported vlan type in single vlan.");
                        return ret;
                }
                break;
@@ -3498,7 +3498,7 @@ i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
        if (reta_size != lut_size ||
                reta_size > ETH_RSS_RETA_SIZE_512) {
                PMD_DRV_LOG(ERR,
-                       "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)\n",
+                       "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
                        reta_size, lut_size);
                return -EINVAL;
        }
@@ -3539,7 +3539,7 @@ i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
        if (reta_size != lut_size ||
                reta_size > ETH_RSS_RETA_SIZE_512) {
                PMD_DRV_LOG(ERR,
-                       "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)\n",
+                       "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
                        reta_size, lut_size);
                return -EINVAL;
        }
@@ -4586,7 +4586,7 @@ i40e_enable_pf_lb(struct i40e_pf *pf)
 
        ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
        if (ret)
-               PMD_DRV_LOG(ERR, "update vsi switch failed, aq_err=%d\n",
+               PMD_DRV_LOG(ERR, "update vsi switch failed, aq_err=%d",
                            hw->aq.asq_last_status);
 }
 
@@ -5183,11 +5183,11 @@ i40e_pf_setup(struct i40e_pf *pf)
        else if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_512)
                settings.hash_lut_size = I40E_HASH_LUT_SIZE_512;
        else {
-               PMD_DRV_LOG(ERR, "Hash lookup table size (%u) not supported\n",
-                                               hw->func_caps.rss_table_size);
+               PMD_DRV_LOG(ERR, "Hash lookup table size (%u) not supported",
+                       hw->func_caps.rss_table_size);
                return I40E_ERR_PARAM;
        }
-       PMD_DRV_LOG(INFO, "Hardware capability of hash lookup table size: %u\n",
+       PMD_DRV_LOG(INFO, "Hardware capability of hash lookup table size: %u",
                hw->func_caps.rss_table_size);
        pf->hash_lut_size = hw->func_caps.rss_table_size;
 
@@ -5870,7 +5870,7 @@ i40e_add_macvlan_filters(struct i40e_vsi *vsi,
                                flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH;
                                break;
                        default:
-                               PMD_DRV_LOG(ERR, "Invalid MAC match type\n");
+                               PMD_DRV_LOG(ERR, "Invalid MAC match type");
                                ret = I40E_ERR_PARAM;
                                goto DONE;
                        }
@@ -5945,7 +5945,7 @@ i40e_remove_macvlan_filters(struct i40e_vsi *vsi,
                                flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH;
                                break;
                        default:
-                               PMD_DRV_LOG(ERR, "Invalid MAC filter type\n");
+                               PMD_DRV_LOG(ERR, "Invalid MAC filter type");
                                ret = I40E_ERR_PARAM;
                                goto DONE;
                        }
@@ -6365,7 +6365,7 @@ i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct ether_addr *addr)
        if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
                filter_type == RTE_MACVLAN_HASH_MATCH) {
                if (vlan_num == 0) {
-                       PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0\n");
+                       PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0");
                        return I40E_ERR_PARAM;
                }
        } else if (filter_type == RTE_MAC_PERFECT_MATCH ||
@@ -7162,7 +7162,7 @@ i40e_dev_set_gre_key_len(struct i40e_hw *hw, uint8_t len)
        int ret = -EINVAL;
 
        val = I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2));
-       PMD_DRV_LOG(DEBUG, "Read original GL_PRS_FVBM with 0x%08x\n", val);
+       PMD_DRV_LOG(DEBUG, "Read original GL_PRS_FVBM with 0x%08x", val);
 
        if (len == 3) {
                reg = val | I40E_GL_PRS_FVBM_MSK_ENA;
@@ -7181,7 +7181,7 @@ i40e_dev_set_gre_key_len(struct i40e_hw *hw, uint8_t len)
        } else {
                ret = 0;
        }
-       PMD_DRV_LOG(DEBUG, "Read modified GL_PRS_FVBM with 0x%08x\n",
+       PMD_DRV_LOG(DEBUG, "Read modified GL_PRS_FVBM with 0x%08x",
                    I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2)));
 
        return ret;
@@ -8025,10 +8025,10 @@ i40e_check_write_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val)
 {
        uint32_t reg = i40e_read_rx_ctl(hw, addr);
 
-       PMD_DRV_LOG(DEBUG, "[0x%08x] original: 0x%08x\n", addr, reg);
+       PMD_DRV_LOG(DEBUG, "[0x%08x] original: 0x%08x", addr, reg);
        if (reg != val)
                i40e_write_rx_ctl(hw, addr, val);
-       PMD_DRV_LOG(DEBUG, "[0x%08x] after: 0x%08x\n", addr,
+       PMD_DRV_LOG(DEBUG, "[0x%08x] after: 0x%08x", addr,
                    (uint32_t)i40e_read_rx_ctl(hw, addr));
 }
 
@@ -8476,7 +8476,7 @@ i40e_ethertype_filter_set(struct i40e_pf *pf,
                        filter->queue, add, &stats, NULL);
 
        PMD_DRV_LOG(INFO,
-               "add/rem control packet filter, return %d, mac_etype_used = %u, etype_used = %u, mac_etype_free = %u, etype_free = %u\n",
+               "add/rem control packet filter, return %d, mac_etype_used = %u, etype_used = %u, mac_etype_free = %u, etype_free = %u",
                ret, stats.mac_etype_used, stats.etype_used,
                stats.mac_etype_free, stats.etype_free);
        if (ret < 0)
@@ -8528,7 +8528,7 @@ i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
                        FALSE);
                break;
        default:
-               PMD_DRV_LOG(ERR, "unsupported operation %u\n", filter_op);
+               PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op);
                ret = -ENOSYS;
                break;
        }
@@ -9766,8 +9766,7 @@ i40e_dcb_hw_configure(struct i40e_pf *pf,
        old_cfg->etsrec = old_cfg->etscfg;
        ret = i40e_set_dcb_config(hw);
        if (ret) {
-               PMD_INIT_LOG(ERR,
-                        "Set DCB Config failed, err %s aq_err %s\n",
+               PMD_INIT_LOG(ERR, "Set DCB Config failed, err %s aq_err %s",
                         i40e_stat_str(hw, ret),
                         i40e_aq_str(hw, hw->aq.asq_last_status));
                return ret;
@@ -9799,7 +9798,7 @@ i40e_dcb_hw_configure(struct i40e_pf *pf,
                ret = i40e_config_switch_comp_tc(main_vsi->veb, tc_map);
                if (ret)
                        PMD_INIT_LOG(WARNING,
-                                "Failed configuring TC for VEB seid=%d\n",
+                                "Failed configuring TC for VEB seid=%d",
                                 main_vsi->veb->seid);
        }
        /* Update each VSI */
@@ -9817,7 +9816,7 @@ i40e_dcb_hw_configure(struct i40e_pf *pf,
                                                         I40E_DEFAULT_TCMAP);
                        if (ret)
                                PMD_INIT_LOG(WARNING,
-                                       "Failed configuring TC for VSI seid=%d\n",
+                                       "Failed configuring TC for VSI seid=%d",
                                        vsi_list->vsi->seid);
                        /* continue */
                }
@@ -10180,8 +10179,7 @@ i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 
        /* mtu setting is forbidden if port is start */
        if (dev_data->dev_started) {
-               PMD_DRV_LOG(ERR,
-                           "port %d must be stopped before configuration\n",
+               PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
                            dev_data->port_id);
                return -EBUSY;
        }
@@ -10224,7 +10222,7 @@ i40e_ethertype_filter_restore(struct i40e_pf *pf)
        }
        PMD_DRV_LOG(INFO, "Ethertype filter:"
                    " mac_etype_used = %u, etype_used = %u,"
-                   " mac_etype_free = %u, etype_free = %u\n",
+                   " mac_etype_free = %u, etype_free = %u",
                    stats.mac_etype_used, stats.etype_used,
                    stats.mac_etype_free, stats.etype_free);
 }
@@ -10475,8 +10473,7 @@ i40e_vsi_rm_mac_filter(struct i40e_vsi *vsi)
                if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
                    filter_type == RTE_MACVLAN_HASH_MATCH) {
                        if (vlan_num == 0) {
-                               PMD_DRV_LOG(ERR,
-                                           "VLAN number shouldn't be 0\n");
+                               PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0");
                                return I40E_ERR_PARAM;
                        }
                } else if (filter_type == RTE_MAC_PERFECT_MATCH ||
index 17a035c..a606aef 100644 (file)
@@ -645,7 +645,7 @@ i40evf_configure_vsi_queues(struct rte_eth_dev *dev)
        ret = i40evf_execute_vf_cmd(dev, &args);
        if (ret)
                PMD_DRV_LOG(ERR, "Failed to execute command of "
-                       "I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES\n");
+                       "I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES");
 
        return ret;
 }
@@ -698,7 +698,7 @@ i40evf_configure_vsi_queues_ext(struct rte_eth_dev *dev)
        ret = i40evf_execute_vf_cmd(dev, &args);
        if (ret)
                PMD_DRV_LOG(ERR, "Failed to execute command of "
-                       "I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT\n");
+                       "I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT");
 
        return ret;
 }
@@ -1325,16 +1325,16 @@ i40evf_handle_pf_event(__rte_unused struct rte_eth_dev *dev,
 
        switch (pf_msg->event) {
        case I40E_VIRTCHNL_EVENT_RESET_IMPENDING:
-               PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_RESET_IMPENDING event\n");
+               PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_RESET_IMPENDING event");
                _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, NULL);
                break;
        case I40E_VIRTCHNL_EVENT_LINK_CHANGE:
-               PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_LINK_CHANGE event\n");
+               PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_LINK_CHANGE event");
                vf->link_up = pf_msg->event_data.link_event.link_status;
                vf->link_speed = pf_msg->event_data.link_event.link_speed;
                break;
        case I40E_VIRTCHNL_EVENT_PF_DRIVER_CLOSE:
-               PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_PF_DRIVER_CLOSE event\n");
+               PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_PF_DRIVER_CLOSE event");
                break;
        default:
                PMD_DRV_LOG(ERR, " unknown event received %u", pf_msg->event);
@@ -1396,7 +1396,7 @@ i40evf_handle_aq_msg(struct rte_eth_dev *dev)
                                                "expect %u, get %u",
                                                vf->pend_cmd, msg_opc);
                                PMD_DRV_LOG(DEBUG, "adminq response is received,"
-                                            " opcode = %d\n", msg_opc);
+                                            " opcode = %d", msg_opc);
                        }
                        break;
                default:
@@ -1434,19 +1434,19 @@ i40evf_dev_interrupt_handler(struct rte_intr_handle *intr_handle,
 
        /* No interrupt event indicated */
        if (!(icr0 & I40E_VFINT_ICR01_INTEVENT_MASK)) {
-               PMD_DRV_LOG(DEBUG, "No interrupt event, nothing to do\n");
+               PMD_DRV_LOG(DEBUG, "No interrupt event, nothing to do");
                goto done;
        }
 
        if (icr0 & I40E_VFINT_ICR01_ADMINQ_MASK) {
-               PMD_DRV_LOG(DEBUG, "ICR01_ADMINQ is reported\n");
+               PMD_DRV_LOG(DEBUG, "ICR01_ADMINQ is reported");
                i40evf_handle_aq_msg(dev);
        }
 
        /* Link Status Change interrupt */
        if (icr0 & I40E_VFINT_ICR01_LINK_STAT_CHANGE_MASK)
                PMD_DRV_LOG(DEBUG, "LINK_STAT_CHANGE is reported,"
-                                  " do nothing\n");
+                                  " do nothing");
 
 done:
        i40evf_enable_irq0(hw);
@@ -2075,7 +2075,7 @@ i40evf_dev_start(struct rte_eth_dev *dev)
                                    dev->data->nb_rx_queues * sizeof(int), 0);
                if (!intr_handle->intr_vec) {
                        PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
-                                    " intr_vec\n", dev->data->nb_rx_queues);
+                                    " intr_vec", dev->data->nb_rx_queues);
                        return -ENOMEM;
                }
        }
@@ -2403,7 +2403,7 @@ i40evf_dev_rss_reta_update(struct rte_eth_dev *dev,
        if (reta_size != ETH_RSS_RETA_SIZE_64) {
                PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
                        "(%d) doesn't match the number of hardware can "
-                       "support (%d)\n", reta_size, ETH_RSS_RETA_SIZE_64);
+                       "support (%d)", reta_size, ETH_RSS_RETA_SIZE_64);
                return -EINVAL;
        }
 
@@ -2442,7 +2442,7 @@ i40evf_dev_rss_reta_query(struct rte_eth_dev *dev,
        if (reta_size != ETH_RSS_RETA_SIZE_64) {
                PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
                        "(%d) doesn't match the number of hardware can "
-                       "support (%d)\n", reta_size, ETH_RSS_RETA_SIZE_64);
+                       "support (%d)", reta_size, ETH_RSS_RETA_SIZE_64);
                return -EINVAL;
        }
 
@@ -2587,7 +2587,7 @@ i40evf_config_rss(struct i40e_vf *vf)
 
        if (vf->dev_data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
                i40evf_disable_rss(vf);
-               PMD_DRV_LOG(DEBUG, "RSS not configured\n");
+               PMD_DRV_LOG(DEBUG, "RSS not configured");
                return 0;
        }
 
@@ -2604,7 +2604,7 @@ i40evf_config_rss(struct i40e_vf *vf)
        rss_conf = vf->dev_data->dev_conf.rx_adv_conf.rss_conf;
        if ((rss_conf.rss_hf & I40E_RSS_OFFLOAD_ALL) == 0) {
                i40evf_disable_rss(vf);
-               PMD_DRV_LOG(DEBUG, "No hash flag is set\n");
+               PMD_DRV_LOG(DEBUG, "No hash flag is set");
                return 0;
        }
 
@@ -2680,8 +2680,7 @@ i40evf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 
        /* mtu setting is forbidden if port is start */
        if (dev_data->dev_started) {
-               PMD_DRV_LOG(ERR,
-                           "port %d must be stopped before configuration\n",
+               PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
                            dev_data->port_id);
                return -EBUSY;
        }
index 67d63ff..0700253 100644 (file)
@@ -1612,6 +1612,6 @@ i40e_fdir_filter_restore(struct i40e_pf *pf)
                           I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
 #endif /* RTE_LIBRTE_I40E_DEBUG_DRIVER */
 
-       PMD_DRV_LOG(INFO, "FDIR: Guarant count: %d,  Best count: %d\n",
+       PMD_DRV_LOG(INFO, "FDIR: Guarant count: %d,  Best count: %d",
                    guarant_cnt, best_cnt);
 }
index 0af0573..f771dfb 100644 (file)
@@ -471,7 +471,7 @@ i40e_pf_host_process_cmd_config_vsi_queues(struct i40e_pf_vf *vf,
                vc_vqci->num_queue_pairs > I40E_MAX_VSI_QP ||
                msglen < I40E_VIRTCHNL_CONFIG_VSI_QUEUES_SIZE(vc_vqci,
                                        vc_vqci->num_queue_pairs)) {
-               PMD_DRV_LOG(ERR, "vsi_queue_config_info argument wrong\n");
+               PMD_DRV_LOG(ERR, "vsi_queue_config_info argument wrong");
                ret = I40E_ERR_PARAM;
                goto send_msg;
        }
@@ -539,7 +539,7 @@ i40e_pf_host_process_cmd_config_vsi_queues_ext(struct i40e_pf_vf *vf,
                vc_vqcei->num_queue_pairs > I40E_MAX_VSI_QP ||
                msglen < I40E_VIRTCHNL_CONFIG_VSI_QUEUES_SIZE(vc_vqcei,
                                        vc_vqcei->num_queue_pairs)) {
-               PMD_DRV_LOG(ERR, "vsi_queue_config_ext_info argument wrong\n");
+               PMD_DRV_LOG(ERR, "vsi_queue_config_ext_info argument wrong");
                ret = I40E_ERR_PARAM;
                goto send_msg;
        }
index 9bfc7b8..608685f 100644 (file)
@@ -269,7 +269,7 @@ i40e_parse_tunneling_params(uint64_t ol_flags,
                *cd_tunneling |= I40E_TXD_CTX_GRE_TUNNELING;
                break;
        default:
-               PMD_TX_LOG(ERR, "Tunnel type not supported\n");
+               PMD_TX_LOG(ERR, "Tunnel type not supported");
                return;
        }
 
index edbf75b..5b625a3 100644 (file)
@@ -2459,7 +2459,7 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
                                    dev->data->nb_rx_queues * sizeof(int), 0);
                if (intr_handle->intr_vec == NULL) {
                        PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
-                                    " intr_vec\n", dev->data->nb_rx_queues);
+                                    " intr_vec", dev->data->nb_rx_queues);
                        return -ENOMEM;
                }
        }
@@ -2579,7 +2579,7 @@ skip_link_setup:
                                             ixgbe_dev_interrupt_handler, dev);
                if (dev->data->dev_conf.intr_conf.lsc != 0)
                        PMD_INIT_LOG(INFO, "lsc won't enable because of"
-                                    " no intr multiplex\n");
+                                    " no intr multiplex");
        }
 
        /* check if rxq interrupt is enabled */
@@ -4282,7 +4282,7 @@ ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
        if (reta_size != sp_reta_size) {
                PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
                        "(%d) doesn't match the number hardware can supported "
-                       "(%d)\n", reta_size, sp_reta_size);
+                       "(%d)", reta_size, sp_reta_size);
                return -EINVAL;
        }
 
@@ -4329,7 +4329,7 @@ ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
        if (reta_size != sp_reta_size) {
                PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
                        "(%d) doesn't match the number hardware can supported "
-                       "(%d)\n", reta_size, sp_reta_size);
+                       "(%d)", reta_size, sp_reta_size);
                return -EINVAL;
        }
 
@@ -4589,7 +4589,7 @@ ixgbevf_dev_start(struct rte_eth_dev *dev)
                                    dev->data->nb_rx_queues * sizeof(int), 0);
                if (intr_handle->intr_vec == NULL) {
                        PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
-                                    " intr_vec\n", dev->data->nb_rx_queues);
+                                    " intr_vec", dev->data->nb_rx_queues);
                        return -ENOMEM;
                }
        }
@@ -6056,7 +6056,7 @@ ixgbe_syn_filter_handle(struct rte_eth_dev *dev,
                                (struct rte_eth_syn_filter *)arg);
                break;
        default:
-               PMD_DRV_LOG(ERR, "unsupported operation %u\n", filter_op);
+               PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op);
                ret = -EINVAL;
                break;
        }
@@ -8199,7 +8199,7 @@ int ixgbe_disable_sec_tx_path_generic(struct ixgbe_hw *hw)
        /* For informational purposes only */
        if (i >= IXGBE_MAX_SECTX_POLL)
                PMD_DRV_LOG(DEBUG, "Tx unit being enabled before security "
-                        "path fully disabled.  Continuing with init.\n");
+                        "path fully disabled.  Continuing with init.");
 
        return IXGBE_SUCCESS;
 }
index 1020d9e..173bebf 100644 (file)
@@ -374,12 +374,12 @@ __nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t update)
                if (new == 0)
                        break;
                if (new & NFP_NET_CFG_UPDATE_ERR) {
-                       PMD_INIT_LOG(ERR, "Reconfig error: 0x%08x\n", new);
+                       PMD_INIT_LOG(ERR, "Reconfig error: 0x%08x", new);
                        return -1;
                }
                if (cnt >= NFP_NET_POLL_TIMEOUT) {
                        PMD_INIT_LOG(ERR, "Reconfig timeout for 0x%08x after"
-                                         " %dms\n", update, cnt);
+                                         " %dms", update, cnt);
                        rte_panic("Exiting\n");
                }
                nanosleep(&wait, 0); /* waiting for a 1ms */
@@ -423,7 +423,7 @@ nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t ctrl, uint32_t update)
         * Reconfig errors imply situations where they can be handled.
         * Otherwise, rte_panic is called inside __nfp_net_reconfig
         */
-       PMD_INIT_LOG(ERR, "Error nfp_net reconfig for ctrl: %x update: %x\n",
+       PMD_INIT_LOG(ERR, "Error nfp_net reconfig for ctrl: %x update: %x",
                     ctrl, update);
        return -EIO;
 }
@@ -453,7 +453,7 @@ nfp_net_configure(struct rte_eth_dev *dev)
         * called after that internal process
         */
 
-       PMD_INIT_LOG(DEBUG, "Configure\n");
+       PMD_INIT_LOG(DEBUG, "Configure");
 
        dev_conf = &dev->data->dev_conf;
        rxmode = &dev_conf->rxmode;
@@ -461,7 +461,7 @@ nfp_net_configure(struct rte_eth_dev *dev)
 
        /* Checking TX mode */
        if (txmode->mq_mode) {
-               PMD_INIT_LOG(INFO, "TX mq_mode DCB and VMDq not supported\n");
+               PMD_INIT_LOG(INFO, "TX mq_mode DCB and VMDq not supported");
                return -EINVAL;
        }
 
@@ -471,13 +471,13 @@ nfp_net_configure(struct rte_eth_dev *dev)
                        update = NFP_NET_CFG_UPDATE_RSS;
                        new_ctrl = NFP_NET_CFG_CTRL_RSS;
                } else {
-                       PMD_INIT_LOG(INFO, "RSS not supported\n");
+                       PMD_INIT_LOG(INFO, "RSS not supported");
                        return -EINVAL;
                }
        }
 
        if (rxmode->split_hdr_size) {
-               PMD_INIT_LOG(INFO, "rxmode does not support split header\n");
+               PMD_INIT_LOG(INFO, "rxmode does not support split header");
                return -EINVAL;
        }
 
@@ -485,13 +485,13 @@ nfp_net_configure(struct rte_eth_dev *dev)
                if (hw->cap & NFP_NET_CFG_CTRL_RXCSUM) {
                        new_ctrl |= NFP_NET_CFG_CTRL_RXCSUM;
                } else {
-                       PMD_INIT_LOG(INFO, "RXCSUM not supported\n");
+                       PMD_INIT_LOG(INFO, "RXCSUM not supported");
                        return -EINVAL;
                }
        }
 
        if (rxmode->hw_vlan_filter) {
-               PMD_INIT_LOG(INFO, "VLAN filter not supported\n");
+               PMD_INIT_LOG(INFO, "VLAN filter not supported");
                return -EINVAL;
        }
 
@@ -499,13 +499,13 @@ nfp_net_configure(struct rte_eth_dev *dev)
                if (hw->cap & NFP_NET_CFG_CTRL_RXVLAN) {
                        new_ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
                } else {
-                       PMD_INIT_LOG(INFO, "hw vlan strip not supported\n");
+                       PMD_INIT_LOG(INFO, "hw vlan strip not supported");
                        return -EINVAL;
                }
        }
 
        if (rxmode->hw_vlan_extend) {
-               PMD_INIT_LOG(INFO, "VLAN extended not supported\n");
+               PMD_INIT_LOG(INFO, "VLAN extended not supported");
                return -EINVAL;
        }
 
@@ -517,12 +517,12 @@ nfp_net_configure(struct rte_eth_dev *dev)
                /* this is handled in rte_eth_dev_configure */
 
        if (rxmode->hw_strip_crc) {
-               PMD_INIT_LOG(INFO, "strip CRC not supported\n");
+               PMD_INIT_LOG(INFO, "strip CRC not supported");
                return -EINVAL;
        }
 
        if (rxmode->enable_scatter) {
-               PMD_INIT_LOG(INFO, "Scatter not supported\n");
+               PMD_INIT_LOG(INFO, "Scatter not supported");
                return -EINVAL;
        }
 
@@ -638,7 +638,7 @@ nfp_configure_rx_interrupt(struct rte_eth_dev *dev,
                                    dev->data->nb_rx_queues * sizeof(int), 0);
                if (!intr_handle->intr_vec) {
                        PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
-                                    " intr_vec\n", dev->data->nb_rx_queues);
+                                    " intr_vec", dev->data->nb_rx_queues);
                        return -ENOMEM;
                }
        }
@@ -646,11 +646,11 @@ nfp_configure_rx_interrupt(struct rte_eth_dev *dev,
        hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
        if (intr_handle->type == RTE_INTR_HANDLE_UIO) {
-               PMD_INIT_LOG(INFO, "VF: enabling RX interrupt with UIO\n");
+               PMD_INIT_LOG(INFO, "VF: enabling RX interrupt with UIO");
                /* UIO just supports one queue and no LSC*/
                nn_cfg_writeb(hw, NFP_NET_CFG_RXR_VEC(0), 0);
        } else {
-               PMD_INIT_LOG(INFO, "VF: enabling RX interrupt with VFIO\n");
+               PMD_INIT_LOG(INFO, "VF: enabling RX interrupt with VFIO");
                for (i = 0; i < dev->data->nb_rx_queues; i++)
                        /*
                         * The first msix vector is reserved for non
@@ -676,7 +676,7 @@ nfp_net_start(struct rte_eth_dev *dev)
 
        hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
-       PMD_INIT_LOG(DEBUG, "Start\n");
+       PMD_INIT_LOG(DEBUG, "Start");
 
        /* Disabling queues just in case... */
        nfp_net_disable_queues(dev);
@@ -765,7 +765,7 @@ nfp_net_stop(struct rte_eth_dev *dev)
 {
        int i;
 
-       PMD_INIT_LOG(DEBUG, "Stop\n");
+       PMD_INIT_LOG(DEBUG, "Stop");
 
        nfp_net_disable_queues(dev);
 
@@ -788,7 +788,7 @@ nfp_net_close(struct rte_eth_dev *dev)
        struct nfp_net_hw *hw;
        struct rte_pci_device *pci_dev;
 
-       PMD_INIT_LOG(DEBUG, "Close\n");
+       PMD_INIT_LOG(DEBUG, "Close");
 
        hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        pci_dev = RTE_DEV_TO_PCI(dev->device);
@@ -825,7 +825,7 @@ nfp_net_promisc_enable(struct rte_eth_dev *dev)
        hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
        if (!(hw->cap & NFP_NET_CFG_CTRL_PROMISC)) {
-               PMD_INIT_LOG(INFO, "Promiscuous mode not supported\n");
+               PMD_INIT_LOG(INFO, "Promiscuous mode not supported");
                return;
        }
 
@@ -1185,7 +1185,7 @@ nfp_net_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx)
        rxq = (struct nfp_net_rxq *)dev->data->rx_queues[queue_idx];
 
        if (rxq == NULL) {
-               PMD_INIT_LOG(ERR, "Bad queue: %u\n", queue_idx);
+               PMD_INIT_LOG(ERR, "Bad queue: %u", queue_idx);
                return 0;
        }
 
@@ -2084,7 +2084,7 @@ nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 
                if (unlikely((pkt->nb_segs > 1) &&
                             !(hw->cap & NFP_NET_CFG_CTRL_GATHER))) {
-                       PMD_INIT_LOG(INFO, "NFP_NET_CFG_CTRL_GATHER not set\n");
+                       PMD_INIT_LOG(INFO, "NFP_NET_CFG_CTRL_GATHER not set");
                        rte_panic("Multisegment packet unsupported\n");
                }
 
@@ -2472,7 +2472,7 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
        hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
        hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
 
-       PMD_INIT_LOG(DEBUG, "nfp_net: device (%u:%u) %u:%u:%u:%u\n",
+       PMD_INIT_LOG(DEBUG, "nfp_net: device (%u:%u) %u:%u:%u:%u",
                     pci_dev->id.vendor_id, pci_dev->id.device_id,
                     pci_dev->addr.domain, pci_dev->addr.bus,
                     pci_dev->addr.devid, pci_dev->addr.function);
@@ -2499,13 +2499,13 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
                return -ENODEV;
        }
 
-       PMD_INIT_LOG(DEBUG, "tx_bar_off: 0x%08x\n", tx_bar_off);
-       PMD_INIT_LOG(DEBUG, "rx_bar_off: 0x%08x\n", rx_bar_off);
+       PMD_INIT_LOG(DEBUG, "tx_bar_off: 0x%08x", tx_bar_off);
+       PMD_INIT_LOG(DEBUG, "rx_bar_off: 0x%08x", rx_bar_off);
 
        hw->tx_bar = (uint8_t *)pci_dev->mem_resource[2].addr + tx_bar_off;
        hw->rx_bar = (uint8_t *)pci_dev->mem_resource[2].addr + rx_bar_off;
 
-       PMD_INIT_LOG(DEBUG, "ctrl_bar: %p, tx_bar: %p, rx_bar: %p\n",
+       PMD_INIT_LOG(DEBUG, "ctrl_bar: %p, tx_bar: %p, rx_bar: %p",
                     hw->ctrl_bar, hw->tx_bar, hw->rx_bar);
 
        nfp_net_cfg_queue_setup(hw);
@@ -2521,9 +2521,9 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
        else
                hw->rx_offset = nn_cfg_readl(hw, NFP_NET_CFG_RX_OFFSET_ADDR);
 
-       PMD_INIT_LOG(INFO, "VER: %#x, Maximum supported MTU: %d\n",
+       PMD_INIT_LOG(INFO, "VER: %#x, Maximum supported MTU: %d",
                     hw->ver, hw->max_mtu);
-       PMD_INIT_LOG(INFO, "CAP: %#x, %s%s%s%s%s%s%s%s%s\n", hw->cap,
+       PMD_INIT_LOG(INFO, "CAP: %#x, %s%s%s%s%s%s%s%s%s", hw->cap,
                     hw->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "",
                     hw->cap & NFP_NET_CFG_CTRL_RXCSUM  ? "RXCSUM "  : "",
                     hw->cap & NFP_NET_CFG_CTRL_TXCSUM  ? "TXCSUM "  : "",
@@ -2539,7 +2539,7 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
        hw->stride_rx = stride;
        hw->stride_tx = stride;
 
-       PMD_INIT_LOG(INFO, "max_rx_queues: %u, max_tx_queues: %u\n",
+       PMD_INIT_LOG(INFO, "max_rx_queues: %u, max_tx_queues: %u",
                     hw->max_rx_queues, hw->max_tx_queues);
 
        /* Initializing spinlock for reconfigs */
index 821ffbc..01ea9b4 100644 (file)
@@ -69,7 +69,7 @@ static void qede_tx_queue_release_mbufs(struct qede_tx_queue *txq)
 {
        unsigned int i;
 
-       PMD_TX_LOG(DEBUG, txq, "releasing %u mbufs\n", txq->nb_tx_desc);
+       PMD_TX_LOG(DEBUG, txq, "releasing %u mbufs", txq->nb_tx_desc);
 
        if (txq->sw_tx_ring) {
                for (i = 0; i < txq->nb_tx_desc; i++) {
@@ -506,7 +506,7 @@ qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq)
         */
        rte_wmb();
 
-       PMD_RX_LOG(DEBUG, rxq, "bd_prod %u  cqe_prod %u\n", bd_prod, cqe_prod);
+       PMD_RX_LOG(DEBUG, rxq, "bd_prod %u  cqe_prod %u", bd_prod, cqe_prod);
 }
 
 static int qede_start_queues(struct rte_eth_dev *eth_dev, bool clear_stats)
@@ -827,7 +827,7 @@ qede_process_sg_pkts(void *p_rxq,  struct rte_mbuf *rx_mb,
                                                        pkt_len;
                if (unlikely(!cur_size)) {
                        PMD_RX_LOG(ERR, rxq, "Length is 0 while %u BDs"
-                                  " left for mapping jumbo\n", num_segs);
+                                  " left for mapping jumbo", num_segs);
                        qede_recycle_rx_bd_ring(rxq, qdev, num_segs);
                        return -EINVAL;
                }
@@ -885,7 +885,7 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
                cqe_type = cqe->fast_path_regular.type;
 
                if (unlikely(cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH)) {
-                       PMD_RX_LOG(DEBUG, rxq, "Got a slowath CQE\n");
+                       PMD_RX_LOG(DEBUG, rxq, "Got a slowath CQE");
 
                        qdev->ops->eth_cqe_completion(edev, fp->id,
                                (struct eth_slow_path_rx_cqe *)cqe);
@@ -907,7 +907,7 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 
                PMD_RX_LOG(DEBUG, rxq,
                           "CQE type = 0x%x, flags = 0x%x, vlan = 0x%x"
-                          " len = %u, parsing_flags = %d\n",
+                          " len = %u, parsing_flags = %d",
                           cqe_type, fp_cqe->bitfields,
                           rte_le_to_cpu_16(fp_cqe->vlan_tag),
                           len, rte_le_to_cpu_16(fp_cqe->pars_flags.flags));
@@ -919,10 +919,10 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
                rx_mb->ol_flags = 0;
 
                if (qede_tunn_exist(parse_flag)) {
-                       PMD_RX_LOG(DEBUG, rxq, "Rx tunneled packet\n");
+                       PMD_RX_LOG(DEBUG, rxq, "Rx tunneled packet");
                        if (unlikely(qede_check_tunn_csum_l4(parse_flag))) {
                                PMD_RX_LOG(ERR, rxq,
-                                           "L4 csum failed, flags = 0x%x\n",
+                                           "L4 csum failed, flags = 0x%x",
                                            parse_flag);
                                rxq->rx_hw_errors++;
                                rx_mb->ol_flags |= PKT_RX_L4_CKSUM_BAD;
@@ -934,17 +934,17 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
                                                        tunn_parse_flag);
                        }
                } else {
-                       PMD_RX_LOG(DEBUG, rxq, "Rx non-tunneled packet\n");
+                       PMD_RX_LOG(DEBUG, rxq, "Rx non-tunneled packet");
                        if (unlikely(qede_check_notunn_csum_l4(parse_flag))) {
                                PMD_RX_LOG(ERR, rxq,
-                                           "L4 csum failed, flags = 0x%x\n",
+                                           "L4 csum failed, flags = 0x%x",
                                            parse_flag);
                                rxq->rx_hw_errors++;
                                rx_mb->ol_flags |= PKT_RX_L4_CKSUM_BAD;
                        } else if (unlikely(qede_check_notunn_csum_l3(rx_mb,
                                                        parse_flag))) {
                                PMD_RX_LOG(ERR, rxq,
-                                          "IP csum failed, flags = 0x%x\n",
+                                          "IP csum failed, flags = 0x%x",
                                           parse_flag);
                                rxq->rx_hw_errors++;
                                rx_mb->ol_flags |= PKT_RX_IP_CKSUM_BAD;
@@ -954,12 +954,12 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
                        }
                }
 
-               PMD_RX_LOG(INFO, rxq, "packet_type 0x%x\n", rx_mb->packet_type);
+               PMD_RX_LOG(INFO, rxq, "packet_type 0x%x", rx_mb->packet_type);
 
                if (unlikely(qede_alloc_rx_buffer(rxq) != 0)) {
                        PMD_RX_LOG(ERR, rxq,
                                   "New buffer allocation failed,"
-                                  "dropping incoming packet\n");
+                                  "dropping incoming packet");
                        qede_recycle_rx_bd_ring(rxq, qdev, fp_cqe->bd_num);
                        rte_eth_devices[rxq->port_id].
                            data->rx_mbuf_alloc_failed++;
@@ -969,7 +969,7 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
                qede_rx_bd_ring_consume(rxq);
                if (fp_cqe->bd_num > 1) {
                        PMD_RX_LOG(DEBUG, rxq, "Jumbo-over-BD packet: %02x BDs"
-                                  " len on first: %04x Total Len: %04x\n",
+                                  " len on first: %04x Total Len: %04x",
                                   fp_cqe->bd_num, len, pkt_len);
                        num_segs = fp_cqe->bd_num - 1;
                        seg1 = rx_mb;
@@ -979,7 +979,7 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
                        for (j = 0; j < num_segs; j++) {
                                if (qede_alloc_rx_buffer(rxq)) {
                                        PMD_RX_LOG(ERR, rxq,
-                                               "Buffer allocation failed\n");
+                                               "Buffer allocation failed");
                                        rte_eth_devices[rxq->port_id].
                                                data->rx_mbuf_alloc_failed++;
                                        rxq->rx_alloc_errors++;
@@ -1006,7 +1006,7 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
                if (qdev->rss_enable && htype) {
                        rx_mb->ol_flags |= PKT_RX_RSS_HASH;
                        rx_mb->hash.rss = rte_le_to_cpu_32(fp_cqe->rss_hash);
-                       PMD_RX_LOG(DEBUG, rxq, "Hash result 0x%x\n",
+                       PMD_RX_LOG(DEBUG, rxq, "Hash result 0x%x",
                                   rx_mb->hash.rss);
                }
 
@@ -1034,7 +1034,7 @@ next_cqe:
                sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
                if (rx_pkt == nb_pkts) {
                        PMD_RX_LOG(DEBUG, rxq,
-                                  "Budget reached nb_pkts=%u received=%u\n",
+                                  "Budget reached nb_pkts=%u received=%u",
                                   rx_pkt, nb_pkts);
                        break;
                }
@@ -1044,7 +1044,7 @@ next_cqe:
 
        rxq->rcv_pkts += rx_pkt;
 
-       PMD_RX_LOG(DEBUG, rxq, "rx_pkts=%u core=%d\n", rx_pkt, rte_lcore_id());
+       PMD_RX_LOG(DEBUG, rxq, "rx_pkts=%u core=%d", rx_pkt, rte_lcore_id());
 
        return rx_pkt;
 }
@@ -1057,9 +1057,9 @@ qede_free_tx_pkt(struct ecore_dev *edev, struct qede_tx_queue *txq)
        struct rte_mbuf *mbuf = txq->sw_tx_ring[idx].mbuf;
 
        if (unlikely(!mbuf)) {
-               PMD_TX_LOG(ERR, txq, "null mbuf\n");
+               PMD_TX_LOG(ERR, txq, "null mbuf");
                PMD_TX_LOG(ERR, txq,
-                          "tx_desc %u tx_avail %u tx_cons %u tx_prod %u\n",
+                          "tx_desc %u tx_avail %u tx_cons %u tx_prod %u",
                           txq->nb_tx_desc, txq->nb_tx_avail, idx,
                           TX_PROD(txq));
                return -1;
@@ -1090,7 +1090,7 @@ qede_process_tx_compl(struct ecore_dev *edev, struct qede_tx_queue *txq)
        while (hw_bd_cons != ecore_chain_get_cons_idx(&txq->tx_pbl)) {
                if (qede_free_tx_pkt(edev, txq)) {
                        PMD_TX_LOG(ERR, txq,
-                                  "hw_bd_cons = %u, chain_cons = %u\n",
+                                  "hw_bd_cons = %u, chain_cons = %u",
                                   hw_bd_cons,
                                   ecore_chain_get_cons_idx(&txq->tx_pbl));
                        break;
@@ -1099,7 +1099,7 @@ qede_process_tx_compl(struct ecore_dev *edev, struct qede_tx_queue *txq)
                tx_compl++;
        }
 
-       PMD_TX_LOG(DEBUG, txq, "Tx compl %u sw_tx_cons %u avail %u\n",
+       PMD_TX_LOG(DEBUG, txq, "Tx compl %u sw_tx_cons %u avail %u",
                   tx_compl, txq->sw_tx_cons, txq->nb_tx_avail);
        return tx_compl;
 }
@@ -1124,7 +1124,7 @@ qede_encode_sg_bd(struct qede_tx_queue *p_txq, struct rte_mbuf *m_seg,
                        memset(bd2, 0, sizeof(*bd2));
                        mapping = rte_mbuf_data_dma_addr(m_seg);
                        QEDE_BD_SET_ADDR_LEN(bd2, mapping, m_seg->data_len);
-                       PMD_TX_LOG(DEBUG, txq, "BD2 len %04x\n",
+                       PMD_TX_LOG(DEBUG, txq, "BD2 len %04x",
                                   m_seg->data_len);
                } else if (nb_segs == 2) {
                        bd3 = (struct eth_tx_3rd_bd *)
@@ -1132,7 +1132,7 @@ qede_encode_sg_bd(struct qede_tx_queue *p_txq, struct rte_mbuf *m_seg,
                        memset(bd3, 0, sizeof(*bd3));
                        mapping = rte_mbuf_data_dma_addr(m_seg);
                        QEDE_BD_SET_ADDR_LEN(bd3, mapping, m_seg->data_len);
-                       PMD_TX_LOG(DEBUG, txq, "BD3 len %04x\n",
+                       PMD_TX_LOG(DEBUG, txq, "BD3 len %04x",
                                   m_seg->data_len);
                } else {
                        tx_bd = (struct eth_tx_bd *)
@@ -1140,7 +1140,7 @@ qede_encode_sg_bd(struct qede_tx_queue *p_txq, struct rte_mbuf *m_seg,
                        memset(tx_bd, 0, sizeof(*tx_bd));
                        mapping = rte_mbuf_data_dma_addr(m_seg);
                        QEDE_BD_SET_ADDR_LEN(tx_bd, mapping, m_seg->data_len);
-                       PMD_TX_LOG(DEBUG, txq, "BD len %04x\n",
+                       PMD_TX_LOG(DEBUG, txq, "BD len %04x",
                                   m_seg->data_len);
                }
                nb_segs++;
@@ -1171,7 +1171,7 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
        fp = &qdev->fp_array[QEDE_RSS_COUNT(qdev) + txq->queue_id];
 
        if (unlikely(txq->nb_tx_avail < txq->tx_free_thresh)) {
-               PMD_TX_LOG(DEBUG, txq, "send=%u avail=%u free_thresh=%u\n",
+               PMD_TX_LOG(DEBUG, txq, "send=%u avail=%u free_thresh=%u",
                           nb_pkts, txq->nb_tx_avail, txq->tx_free_thresh);
                (void)qede_process_tx_compl(edev, txq);
        }
@@ -1179,7 +1179,7 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
        nb_tx_pkts = RTE_MIN(nb_pkts, (txq->nb_tx_avail /
                        ETH_TX_MAX_BDS_PER_NON_LSO_PACKET));
        if (unlikely(nb_tx_pkts == 0)) {
-               PMD_TX_LOG(DEBUG, txq, "Out of BDs nb_pkts=%u avail=%u\n",
+               PMD_TX_LOG(DEBUG, txq, "Out of BDs nb_pkts=%u avail=%u",
                           nb_pkts, txq->nb_tx_avail);
                return 0;
        }
@@ -1200,10 +1200,10 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
                /* Map MBUF linear data for DMA and set in the first BD */
                QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_dma_addr(mbuf),
                                     mbuf->data_len);
-               PMD_TX_LOG(INFO, txq, "BD1 len %04x\n", mbuf->data_len);
+               PMD_TX_LOG(INFO, txq, "BD1 len %04x", mbuf->data_len);
 
                if (RTE_ETH_IS_TUNNEL_PKT(mbuf->packet_type)) {
-                       PMD_TX_LOG(INFO, txq, "Tx tunnel packet\n");
+                       PMD_TX_LOG(INFO, txq, "Tx tunnel packet");
                        /* First indicate its a tunnel pkt */
                        bd1->data.bd_flags.bitfields |=
                                ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK <<
@@ -1219,7 +1219,7 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 
                        /* Outer IP checksum offload */
                        if (mbuf->ol_flags & PKT_TX_OUTER_IP_CKSUM) {
-                               PMD_TX_LOG(INFO, txq, "OuterIP csum offload\n");
+                               PMD_TX_LOG(INFO, txq, "OuterIP csum offload");
                                bd1->data.bd_flags.bitfields |=
                                        ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK <<
                                        ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT;
@@ -1233,7 +1233,7 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 
                /* Descriptor based VLAN insertion */
                if (mbuf->ol_flags & (PKT_TX_VLAN_PKT | PKT_TX_QINQ_PKT)) {
-                       PMD_TX_LOG(INFO, txq, "Insert VLAN 0x%x\n",
+                       PMD_TX_LOG(INFO, txq, "Insert VLAN 0x%x",
                                   mbuf->vlan_tci);
                        bd1->data.vlan = rte_cpu_to_le_16(mbuf->vlan_tci);
                        bd1->data.bd_flags.bitfields |=
@@ -1242,14 +1242,14 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 
                /* Offload the IP checksum in the hardware */
                if (mbuf->ol_flags & PKT_TX_IP_CKSUM) {
-                       PMD_TX_LOG(INFO, txq, "IP csum offload\n");
+                       PMD_TX_LOG(INFO, txq, "IP csum offload");
                        bd1->data.bd_flags.bitfields |=
                            1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
                }
 
                /* L4 checksum offload (tcp or udp) */
                if (mbuf->ol_flags & (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) {
-                       PMD_TX_LOG(INFO, txq, "L4 csum offload\n");
+                       PMD_TX_LOG(INFO, txq, "L4 csum offload");
                        bd1->data.bd_flags.bitfields |=
                            1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
                        /* IPv6 + extn. -> later */
@@ -1267,7 +1267,7 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
                    rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq->tx_pbl));
                nb_pkt_sent++;
                txq->xmit_pkts++;
-               PMD_TX_LOG(INFO, txq, "nbds = %d pkt_len = %04x\n",
+               PMD_TX_LOG(INFO, txq, "nbds = %d pkt_len = %04x",
                           bd1->data.nbds, mbuf->pkt_len);
        }
 
@@ -1281,7 +1281,7 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
        /* Check again for Tx completions */
        (void)qede_process_tx_compl(edev, txq);
 
-       PMD_TX_LOG(DEBUG, txq, "to_send=%u can_send=%u sent=%u core=%d\n",
+       PMD_TX_LOG(DEBUG, txq, "to_send=%u can_send=%u sent=%u core=%d",
                   nb_pkts, tx_count, nb_pkt_sent, rte_lcore_id());
 
        return nb_pkt_sent;
index fe6de6f..f717faa 100644 (file)
@@ -104,7 +104,7 @@ sfc_ev_rx(void *arg, __rte_unused uint32_t label, uint32_t id,
                        evq->exception = B_TRUE;
                        sfc_err(evq->sa,
                                "EVQ %u RxQ %u invalid RX abort "
-                               "(id=%#x size=%u flags=%#x); needs restart\n",
+                               "(id=%#x size=%u flags=%#x); needs restart",
                                evq->evq_index, sfc_rxq_sw_index(rxq),
                                id, size, flags);
                        goto done;
@@ -119,7 +119,7 @@ sfc_ev_rx(void *arg, __rte_unused uint32_t label, uint32_t id,
 
                sfc_err(evq->sa,
                        "EVQ %u RxQ %u completion out of order "
-                       "(id=%#x delta=%u flags=%#x); needs restart\n",
+                       "(id=%#x delta=%u flags=%#x); needs restart",
                        evq->evq_index, sfc_rxq_sw_index(rxq), id, delta,
                        flags);
 
index 68dde08..d1ff234 100644 (file)
@@ -630,7 +630,7 @@ virtio_dev_promiscuous_enable(struct rte_eth_dev *dev)
        int ret;
 
        if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
-               PMD_INIT_LOG(INFO, "host does not support rx control\n");
+               PMD_INIT_LOG(INFO, "host does not support rx control");
                return;
        }
 
@@ -653,7 +653,7 @@ virtio_dev_promiscuous_disable(struct rte_eth_dev *dev)
        int ret;
 
        if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
-               PMD_INIT_LOG(INFO, "host does not support rx control\n");
+               PMD_INIT_LOG(INFO, "host does not support rx control");
                return;
        }
 
@@ -676,7 +676,7 @@ virtio_dev_allmulticast_enable(struct rte_eth_dev *dev)
        int ret;
 
        if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
-               PMD_INIT_LOG(INFO, "host does not support rx control\n");
+               PMD_INIT_LOG(INFO, "host does not support rx control");
                return;
        }
 
@@ -699,7 +699,7 @@ virtio_dev_allmulticast_disable(struct rte_eth_dev *dev)
        int ret;
 
        if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
-               PMD_INIT_LOG(INFO, "host does not support rx control\n");
+               PMD_INIT_LOG(INFO, "host does not support rx control");
                return;
        }
 
@@ -723,7 +723,7 @@ virtio_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
        uint32_t frame_size = mtu + ether_hdr_len;
 
        if (mtu < ETHER_MIN_MTU || frame_size > VIRTIO_MAX_RX_PKTLEN) {
-               PMD_INIT_LOG(ERR, "MTU should be between %d and %d\n",
+               PMD_INIT_LOG(ERR, "MTU should be between %d and %d",
                        ETHER_MIN_MTU, VIRTIO_MAX_RX_PKTLEN - ether_hdr_len);
                return -EINVAL;
        }
@@ -1233,7 +1233,7 @@ virtio_queues_bind_intr(struct rte_eth_dev *dev)
        uint32_t i;
        struct virtio_hw *hw = dev->data->dev_private;
 
-       PMD_INIT_LOG(INFO, "queue/interrupt binding\n");
+       PMD_INIT_LOG(INFO, "queue/interrupt binding");
        for (i = 0; i < dev->data->nb_rx_queues; ++i) {
                dev->intr_handle->intr_vec[i] = i + 1;
                if (VTPCI_OPS(hw)->set_queue_irq(hw, hw->vqs[i * 2], i + 1) ==
@@ -1252,7 +1252,7 @@ virtio_queues_unbind_intr(struct rte_eth_dev *dev)
        uint32_t i;
        struct virtio_hw *hw = dev->data->dev_private;
 
-       PMD_INIT_LOG(INFO, "queue/interrupt unbinding\n");
+       PMD_INIT_LOG(INFO, "queue/interrupt unbinding");
        for (i = 0; i < dev->data->nb_rx_queues; ++i)
                VTPCI_OPS(hw)->set_queue_irq(hw,
                                             hw->vqs[i * VTNET_CQ],
index 594b6ff..cab6e8f 100644 (file)
@@ -133,7 +133,7 @@ virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct rte_mbuf **rx_pkts,
                cookie = (struct rte_mbuf *)vq->vq_descx[desc_idx].cookie;
 
                if (unlikely(cookie == NULL)) {
-                       PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u\n",
+                       PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
                                vq->vq_used_cons_idx);
                        break;
                }
index 6617bc8..21ed00d 100644 (file)
@@ -62,7 +62,7 @@ virtio_user_create_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
         */
        callfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
        if (callfd < 0) {
-               PMD_DRV_LOG(ERR, "callfd error, %s\n", strerror(errno));
+               PMD_DRV_LOG(ERR, "callfd error, %s", strerror(errno));
                return -1;
        }
        file.index = queue_sel;
@@ -105,7 +105,7 @@ virtio_user_kick_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
         */
        kickfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
        if (kickfd < 0) {
-               PMD_DRV_LOG(ERR, "kickfd error, %s\n", strerror(errno));
+               PMD_DRV_LOG(ERR, "kickfd error, %s", strerror(errno));
                return -1;
        }
        file.index = queue_sel;
index e544acc..0b226ac 100644 (file)
@@ -82,7 +82,7 @@ virtio_user_write_dev_config(struct virtio_hw *hw, size_t offset,
                for (i = 0; i < ETHER_ADDR_LEN; ++i)
                        dev->mac_addr[i] = ((const uint8_t *)src)[i];
        else
-               PMD_DRV_LOG(ERR, "not supported offset=%zu, len=%d\n",
+               PMD_DRV_LOG(ERR, "not supported offset=%zu, len=%d",
                            offset, length);
 }
 
@@ -212,7 +212,7 @@ virtio_user_notify_queue(struct virtio_hw *hw, struct virtqueue *vq)
        }
 
        if (write(dev->kickfds[vq->vq_queue_index], &buf, sizeof(buf)) < 0)
-               PMD_DRV_LOG(ERR, "failed to kick backend: %s\n",
+               PMD_DRV_LOG(ERR, "failed to kick backend: %s",
                            strerror(errno));
 }
 
@@ -370,7 +370,7 @@ virtio_user_pmd_probe(const char *name, const char *params)
                        goto end;
                }
        } else {
-               PMD_INIT_LOG(ERR, "arg %s is mandatory for virtio_user\n",
+               PMD_INIT_LOG(ERR, "arg %s is mandatory for virtio_user",
                          VIRTIO_USER_ARG_QUEUE_SIZE);
                goto end;
        }
@@ -461,7 +461,7 @@ virtio_user_pmd_remove(const char *name)
        if (!name)
                return -EINVAL;
 
-       PMD_DRV_LOG(INFO, "Un-Initializing %s\n", name);
+       PMD_DRV_LOG(INFO, "Un-Initializing %s", name);
        eth_dev = rte_eth_dev_allocated(name);
        if (!eth_dev)
                return -ENODEV;