int mrvl_port_bpool_size[PP2_NUM_PKT_PROC][PP2_BPOOL_NUM_POOLS][RTE_MAX_LCORE];
uint64_t cookie_addr_high = MRVL_COOKIE_ADDR_INVALID;
+int mrvl_logtype;
+
struct mrvl_ifnames {
const char *names[PP2_NUM_ETH_PPIO * PP2_NUM_PKT_PROC];
int idx;
ret = mrvl_reserve_bit(&used_hifs, MRVL_MUSDK_HIFS_MAX);
if (ret < 0) {
- RTE_LOG(ERR, PMD, "Failed to allocate hif %d\n", core_id);
+ MRVL_LOG(ERR, "Failed to allocate hif %d", core_id);
return ret;
}
params.out_size = MRVL_PP2_AGGR_TXQD_MAX;
ret = pp2_hif_init(¶ms, &hifs[core_id]);
if (ret) {
- RTE_LOG(ERR, PMD, "Failed to initialize hif %d\n", core_id);
+ MRVL_LOG(ERR, "Failed to initialize hif %d", core_id);
return ret;
}
ret = mrvl_init_hif(core_id);
if (ret < 0) {
- RTE_LOG(ERR, PMD, "Failed to allocate hif %d\n", core_id);
+ MRVL_LOG(ERR, "Failed to allocate hif %d", core_id);
goto out;
}
mrvl_configure_rss(struct mrvl_priv *priv, struct rte_eth_rss_conf *rss_conf)
{
if (rss_conf->rss_key)
- RTE_LOG(WARNING, PMD, "Changing hash key is not supported\n");
+ MRVL_LOG(WARNING, "Changing hash key is not supported");
if (rss_conf->rss_hf == 0) {
priv->ppio_params.inqs_params.hash_type = PP2_PPIO_HASH_T_NONE;
if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_NONE &&
dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
- RTE_LOG(INFO, PMD, "Unsupported rx multi queue mode %d\n",
+ MRVL_LOG(INFO, "Unsupported rx multi queue mode %d",
dev->data->dev_conf.rxmode.mq_mode);
return -EINVAL;
}
if (!(dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
- RTE_LOG(INFO, PMD,
- "L2 CRC stripping is always enabled in hw\n");
+ MRVL_LOG(INFO,
+ "L2 CRC stripping is always enabled in hw");
dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
}
if (dev->data->dev_conf.rxmode.split_hdr_size) {
- RTE_LOG(INFO, PMD, "Split headers not supported\n");
+ MRVL_LOG(INFO, "Split headers not supported");
return -EINVAL;
}
if (dev->data->nb_rx_queues == 1 &&
dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
- RTE_LOG(WARNING, PMD, "Disabling hash for 1 rx queue\n");
+ MRVL_LOG(WARNING, "Disabling hash for 1 rx queue");
priv->ppio_params.inqs_params.hash_type = PP2_PPIO_HASH_T_NONE;
return 0;
/* passing 1 enables given tx queue */
ret = pp2_ppio_set_outq_state(priv->ppio, queue_id, 1);
if (ret) {
- RTE_LOG(ERR, PMD, "Failed to start txq %d\n", queue_id);
+ MRVL_LOG(ERR, "Failed to start txq %d", queue_id);
return ret;
}
/* passing 0 disables given tx queue */
ret = pp2_ppio_set_outq_state(priv->ppio, queue_id, 0);
if (ret) {
- RTE_LOG(ERR, PMD, "Failed to stop txq %d\n", queue_id);
+ MRVL_LOG(ERR, "Failed to stop txq %d", queue_id);
return ret;
}
priv->bpool_init_size += buffs_to_add;
ret = mrvl_fill_bpool(dev->data->rx_queues[0], buffs_to_add);
if (ret)
- RTE_LOG(ERR, PMD, "Failed to add buffers to bpool\n");
+ MRVL_LOG(ERR, "Failed to add buffers to bpool");
}
/*
ret = pp2_ppio_init(&priv->ppio_params, &priv->ppio);
if (ret) {
- RTE_LOG(ERR, PMD, "Failed to init ppio\n");
+ MRVL_LOG(ERR, "Failed to init ppio");
return ret;
}
if (!priv->uc_mc_flushed) {
ret = pp2_ppio_flush_mac_addrs(priv->ppio, 1, 1);
if (ret) {
- RTE_LOG(ERR, PMD,
- "Failed to flush uc/mc filter list\n");
+ MRVL_LOG(ERR,
+ "Failed to flush uc/mc filter list");
goto out;
}
priv->uc_mc_flushed = 1;
if (!priv->vlan_flushed) {
ret = pp2_ppio_flush_vlan(priv->ppio);
if (ret) {
- RTE_LOG(ERR, PMD, "Failed to flush vlan list\n");
+ MRVL_LOG(ERR, "Failed to flush vlan list");
/*
* TODO
* once pp2_ppio_flush_vlan() is supported jump to out
if (mrvl_qos_cfg) {
ret = mrvl_start_qos_mapping(priv);
if (ret) {
- RTE_LOG(ERR, PMD, "Failed to setup QoS mapping\n");
+ MRVL_LOG(ERR, "Failed to setup QoS mapping");
goto out;
}
}
ret = mrvl_dev_set_link_up(dev);
if (ret) {
- RTE_LOG(ERR, PMD, "Failed to set link up\n");
+ MRVL_LOG(ERR, "Failed to set link up");
goto out;
}
return 0;
out:
- RTE_LOG(ERR, PMD, "Failed to start device\n");
+ MRVL_LOG(ERR, "Failed to start device");
pp2_ppio_deinit(priv->ppio);
return ret;
}
{
int i;
- RTE_LOG(INFO, PMD, "Flushing rx queues\n");
+ MRVL_LOG(INFO, "Flushing rx queues");
for (i = 0; i < dev->data->nb_rx_queues; i++) {
int ret, num;
int i, j;
struct mrvl_txq *txq;
- RTE_LOG(INFO, PMD, "Flushing tx shadow queues\n");
+ MRVL_LOG(INFO, "Flushing tx shadow queues");
for (i = 0; i < dev->data->nb_tx_queues; i++) {
txq = (struct mrvl_txq *)dev->data->tx_queues[i];
ret = pp2_bpool_get_num_buffs(priv->bpool, &num);
if (ret) {
- RTE_LOG(ERR, PMD, "Failed to get bpool buffers number\n");
+ MRVL_LOG(ERR, "Failed to get bpool buffers number");
return;
}
ret = pp2_ppio_set_promisc(priv->ppio, 1);
if (ret)
- RTE_LOG(ERR, PMD, "Failed to enable promiscuous mode\n");
+ MRVL_LOG(ERR, "Failed to enable promiscuous mode");
}
/**
ret = pp2_ppio_set_mc_promisc(priv->ppio, 1);
if (ret)
- RTE_LOG(ERR, PMD, "Failed enable all-multicast mode\n");
+ MRVL_LOG(ERR, "Failed enable all-multicast mode");
}
/**
ret = pp2_ppio_set_promisc(priv->ppio, 0);
if (ret)
- RTE_LOG(ERR, PMD, "Failed to disable promiscuous mode\n");
+ MRVL_LOG(ERR, "Failed to disable promiscuous mode");
}
/**
ret = pp2_ppio_set_mc_promisc(priv->ppio, 0);
if (ret)
- RTE_LOG(ERR, PMD, "Failed to disable all-multicast mode\n");
+ MRVL_LOG(ERR, "Failed to disable all-multicast mode");
}
/**
if (ret) {
ether_format_addr(buf, sizeof(buf),
&dev->data->mac_addrs[index]);
- RTE_LOG(ERR, PMD, "Failed to remove mac %s\n", buf);
+ MRVL_LOG(ERR, "Failed to remove mac %s", buf);
}
}
ret = pp2_ppio_add_mac_addr(priv->ppio, mac_addr->addr_bytes);
if (ret) {
ether_format_addr(buf, sizeof(buf), mac_addr);
- RTE_LOG(ERR, PMD, "Failed to add mac %s\n", buf);
+ MRVL_LOG(ERR, "Failed to add mac %s", buf);
return -1;
}
if (ret) {
char buf[ETHER_ADDR_FMT_SIZE];
ether_format_addr(buf, sizeof(buf), mac_addr);
- RTE_LOG(ERR, PMD, "Failed to set mac to %s\n", buf);
+ MRVL_LOG(ERR, "Failed to set mac to %s", buf);
}
return ret;
idx = rxq->queue_id;
if (unlikely(idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)) {
- RTE_LOG(ERR, PMD,
- "rx queue %d stats out of range (0 - %d)\n",
+ MRVL_LOG(ERR,
+ "rx queue %d stats out of range (0 - %d)",
idx, RTE_ETHDEV_QUEUE_STAT_CNTRS - 1);
continue;
}
priv->rxq_map[idx].inq,
&rx_stats, 0);
if (unlikely(ret)) {
- RTE_LOG(ERR, PMD,
- "Failed to update rx queue %d stats\n", idx);
+ MRVL_LOG(ERR,
+ "Failed to update rx queue %d stats", idx);
break;
}
idx = txq->queue_id;
if (unlikely(idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)) {
- RTE_LOG(ERR, PMD,
- "tx queue %d stats out of range (0 - %d)\n",
+ MRVL_LOG(ERR,
+ "tx queue %d stats out of range (0 - %d)",
idx, RTE_ETHDEV_QUEUE_STAT_CNTRS - 1);
}
ret = pp2_ppio_outq_get_statistics(priv->ppio, idx,
&tx_stats, 0);
if (unlikely(ret)) {
- RTE_LOG(ERR, PMD,
- "Failed to update tx queue %d stats\n", idx);
+ MRVL_LOG(ERR,
+ "Failed to update tx queue %d stats", idx);
break;
}
ret = pp2_ppio_get_statistics(priv->ppio, &ppio_stats, 0);
if (unlikely(ret)) {
- RTE_LOG(ERR, PMD, "Failed to update port statistics\n");
+ MRVL_LOG(ERR, "Failed to update port statistics");
return ret;
}
for (i = 0; i < num; i++) {
if (((uint64_t)mbufs[i] & MRVL_COOKIE_HIGH_ADDR_MASK)
!= cookie_addr_high) {
- RTE_LOG(ERR, PMD,
- "mbuf virtual addr high 0x%lx out of range\n",
+ MRVL_LOG(ERR,
+ "mbuf virtual addr high 0x%lx out of range",
(uint64_t)mbufs[i] >> 32);
goto out;
}
/*
* Unknown TC mapping, mapping will not have a correct queue.
*/
- RTE_LOG(ERR, PMD, "Unknown TC mapping for queue %hu eth%hhu\n",
+ MRVL_LOG(ERR, "Unknown TC mapping for queue %hu eth%hhu",
idx, priv->ppio_id);
return -EFAULT;
}
min_size = rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM -
MRVL_PKT_EFFEC_OFFS;
if (min_size < max_rx_pkt_len) {
- RTE_LOG(ERR, PMD,
- "Mbuf size must be increased to %u bytes to hold up to %u bytes of data.\n",
+ MRVL_LOG(ERR,
+ "Mbuf size must be increased to %u bytes to hold up to %u bytes of data.",
max_rx_pkt_len + RTE_PKTMBUF_HEADROOM +
MRVL_PKT_EFFEC_OFFS,
max_rx_pkt_len);
ret = pp2_ppio_get_rx_pause(priv->ppio, &en);
if (ret) {
- RTE_LOG(ERR, PMD, "Failed to read rx pause state\n");
+ MRVL_LOG(ERR, "Failed to read rx pause state");
return ret;
}
fc_conf->pause_time ||
fc_conf->mac_ctrl_frame_fwd ||
fc_conf->autoneg) {
- RTE_LOG(ERR, PMD, "Flowctrl parameter is not supported\n");
+ MRVL_LOG(ERR, "Flowctrl parameter is not supported");
return -EINVAL;
}
en = fc_conf->mode == RTE_FC_NONE ? 0 : 1;
ret = pp2_ppio_set_rx_pause(priv->ppio, en);
if (ret)
- RTE_LOG(ERR, PMD,
- "Failed to change flowctrl on RX side\n");
+ MRVL_LOG(ERR,
+ "Failed to change flowctrl on RX side");
return ret;
}
*(const void **)arg = &mrvl_flow_ops;
return 0;
default:
- RTE_LOG(WARNING, PMD, "Filter type (%d) not supported",
+ MRVL_LOG(WARNING, "Filter type (%d) not supported",
filter_type);
return -EINVAL;
}
*l4_offset = *l3_offset + MRVL_ARP_LENGTH;
break;
default:
- RTE_LOG(DEBUG, PMD, "Failed to recognise l3 packet type\n");
+ MRVL_LOG(DEBUG, "Failed to recognise l3 packet type");
break;
}
packet_type |= RTE_PTYPE_L4_UDP;
break;
default:
- RTE_LOG(DEBUG, PMD, "Failed to recognise l4 packet type\n");
+ MRVL_LOG(DEBUG, "Failed to recognise l4 packet type");
break;
}
ret = pp2_ppio_recv(q->priv->ppio, q->priv->rxq_map[q->queue_id].tc,
q->priv->rxq_map[q->queue_id].inq, descs, &nb_pkts);
if (unlikely(ret < 0)) {
- RTE_LOG(ERR, PMD, "Failed to receive packets\n");
+ MRVL_LOG(ERR, "Failed to receive packets");
return 0;
}
mrvl_port_bpool_size[bpool->pp2_id][bpool->id][core_id] -= nb_pkts;
(!rx_done && num < q->priv->bpool_init_size))) {
ret = mrvl_fill_bpool(q, MRVL_BURST_SIZE);
if (ret)
- RTE_LOG(ERR, PMD, "Failed to fill bpool\n");
+ MRVL_LOG(ERR, "Failed to fill bpool");
} else if (unlikely(num > q->priv->bpool_max_size)) {
int i;
int pkt_to_remove = num - q->priv->bpool_init_size;
struct rte_mbuf *mbuf;
struct pp2_buff_inf buff;
- RTE_LOG(DEBUG, PMD,
- "\nport-%d:%d: bpool %d oversize - remove %d buffers (pool size: %d -> %d)\n",
+ MRVL_LOG(DEBUG,
+ "port-%d:%d: bpool %d oversize - remove %d buffers (pool size: %d -> %d)",
bpool->pp2_id, q->priv->ppio->port_id,
bpool->id, pkt_to_remove, num,
q->priv->bpool_init_size);
for (i = 0; i < nb_done; i++) {
entry = &sq->ent[sq->tail + num];
if (unlikely(!entry->buff.addr)) {
- RTE_LOG(ERR, PMD,
- "Shadow memory @%d: cookie(%lx), pa(%lx)!\n",
+ MRVL_LOG(ERR,
+ "Shadow memory @%d: cookie(%lx), pa(%lx)!",
sq->tail, (u64)entry->buff.cookie,
(u64)entry->buff.addr);
skip_bufs = 1;
sq_free_size = MRVL_PP2_TX_SHADOWQ_SIZE - sq->size - 1;
if (unlikely(nb_pkts > sq_free_size)) {
- RTE_LOG(DEBUG, PMD,
- "No room in shadow queue for %d packets! %d packets will be sent.\n",
+ MRVL_LOG(DEBUG,
+ "No room in shadow queue for %d packets! %d packets will be sent.",
nb_pkts, sq_free_size);
nb_pkts = sq_free_size;
}
rte_zmalloc("mac_addrs",
ETHER_ADDR_LEN * MRVL_MAC_ADDRS_MAX, 0);
if (!eth_dev->data->mac_addrs) {
- RTE_LOG(ERR, PMD, "Failed to allocate space for eth addrs\n");
+ MRVL_LOG(ERR, "Failed to allocate space for eth addrs");
ret = -ENOMEM;
goto out_free_priv;
}
*/
if (!mrvl_qos_cfg) {
cfgnum = rte_kvargs_count(kvlist, MRVL_CFG_ARG);
- RTE_LOG(INFO, PMD, "Parsing config file!\n");
+ MRVL_LOG(INFO, "Parsing config file!");
if (cfgnum > 1) {
- RTE_LOG(ERR, PMD, "Cannot handle more than one config file!\n");
+ MRVL_LOG(ERR, "Cannot handle more than one config file!");
goto out_free_kvlist;
} else if (cfgnum == 1) {
rte_kvargs_process(kvlist, MRVL_CFG_ARG,
if (mrvl_dev_num)
goto init_devices;
- RTE_LOG(INFO, PMD, "Perform MUSDK initializations\n");
+ MRVL_LOG(INFO, "Perform MUSDK initializations");
/*
* ret == -EEXIST is correct, it means DMA
* has been already initialized (by another PMD).
if (ret != -EEXIST)
goto out_free_kvlist;
else
- RTE_LOG(INFO, PMD,
- "DMA memory has been already initialized by a different driver.\n");
+ MRVL_LOG(INFO,
+ "DMA memory has been already initialized by a different driver.");
}
ret = mrvl_init_pp2();
if (ret) {
- RTE_LOG(ERR, PMD, "Failed to init PP!\n");
+ MRVL_LOG(ERR, "Failed to init PP!");
goto out_deinit_dma;
}
init_devices:
for (i = 0; i < ifnum; i++) {
- RTE_LOG(INFO, PMD, "Creating %s\n", ifnames.names[i]);
+ MRVL_LOG(INFO, "Creating %s", ifnames.names[i]);
ret = mrvl_eth_dev_create(vdev, ifnames.names[i]);
if (ret)
goto out_cleanup;
if (!name)
return -EINVAL;
- RTE_LOG(INFO, PMD, "Removing %s\n", name);
+ MRVL_LOG(INFO, "Removing %s", name);
RTE_ETH_FOREACH_DEV(i) { /* FIXME: removing all devices! */
char ifname[RTE_ETH_NAME_MAX_LEN];
}
if (mrvl_dev_num == 0) {
- RTE_LOG(INFO, PMD, "Perform MUSDK deinit\n");
+ MRVL_LOG(INFO, "Perform MUSDK deinit");
mrvl_deinit_hifs();
mrvl_deinit_pp2();
mv_sys_dma_mem_destroy();
RTE_PMD_REGISTER_VDEV(net_mvpp2, pmd_mrvl_drv);
RTE_PMD_REGISTER_ALIAS(net_mvpp2, eth_mvpp2);
+
+RTE_INIT(mrvl_init_log);
+static void
+mrvl_init_log(void)
+{
+ mrvl_logtype = rte_log_register("pmd.net.mvpp2");
+ if (mrvl_logtype >= 0)
+ rte_log_set_level(mrvl_logtype, RTE_LOG_NOTICE);
+}
}
if (mask->type) {
- RTE_LOG(WARNING, PMD, "eth type mask is ignored\n");
+ MRVL_LOG(WARNING, "eth type mask is ignored");
ret = mrvl_parse_type(spec, mask, flow);
if (ret)
goto out;
m = rte_be_to_cpu_16(mask->tci);
if (m & MRVL_VLAN_ID_MASK) {
- RTE_LOG(WARNING, PMD, "vlan id mask is ignored\n");
+ MRVL_LOG(WARNING, "vlan id mask is ignored");
ret = mrvl_parse_vlan_id(spec, mask, flow);
if (ret)
goto out;
}
if (m & MRVL_VLAN_PRI_MASK) {
- RTE_LOG(WARNING, PMD, "vlan pri mask is ignored\n");
+ MRVL_LOG(WARNING, "vlan pri mask is ignored");
ret = mrvl_parse_vlan_pri(spec, mask, flow);
if (ret)
goto out;
if (flow->pattern & F_TYPE) {
rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM, item,
- "VLAN TPID matching is not supported\n");
+ "VLAN TPID matching is not supported");
return -rte_errno;
}
if (mask->inner_type) {
.type = mask->inner_type,
};
- RTE_LOG(WARNING, PMD, "inner eth type mask is ignored\n");
+ MRVL_LOG(WARNING, "inner eth type mask is ignored");
ret = mrvl_parse_type(&spec_eth, &mask_eth, flow);
if (ret)
goto out;
}
if (mask->hdr.next_proto_id) {
- RTE_LOG(WARNING, PMD, "next proto id mask is ignored\n");
+ MRVL_LOG(WARNING, "next proto id mask is ignored");
ret = mrvl_parse_ip4_proto(spec, mask, flow);
if (ret)
goto out;
}
if (mask->hdr.proto) {
- RTE_LOG(WARNING, PMD, "next header mask is ignored\n");
+ MRVL_LOG(WARNING, "next header mask is ignored");
ret = mrvl_parse_ip6_next_hdr(spec, mask, flow);
if (ret)
goto out;
}
if (mask->hdr.src_port) {
- RTE_LOG(WARNING, PMD, "tcp sport mask is ignored\n");
+ MRVL_LOG(WARNING, "tcp sport mask is ignored");
ret = mrvl_parse_tcp_sport(spec, mask, flow);
if (ret)
goto out;
}
if (mask->hdr.dst_port) {
- RTE_LOG(WARNING, PMD, "tcp dport mask is ignored\n");
+ MRVL_LOG(WARNING, "tcp dport mask is ignored");
ret = mrvl_parse_tcp_dport(spec, mask, flow);
if (ret)
goto out;
}
if (mask->hdr.src_port) {
- RTE_LOG(WARNING, PMD, "udp sport mask is ignored\n");
+ MRVL_LOG(WARNING, "udp sport mask is ignored");
ret = mrvl_parse_udp_sport(spec, mask, flow);
if (ret)
goto out;
}
if (mask->hdr.dst_port) {
- RTE_LOG(WARNING, PMD, "udp dport mask is ignored\n");
+ MRVL_LOG(WARNING, "udp dport mask is ignored");
ret = mrvl_parse_udp_dport(spec, mask, flow);
if (ret)
goto out;
* Unknown TC mapping, mapping will not have
* a correct queue.
*/
- RTE_LOG(ERR, PMD,
- "Unknown TC mapping for queue %hu eth%hhu\n",
+ MRVL_LOG(ERR,
+ "Unknown TC mapping for queue %hu eth%hhu",
q->index, priv->ppio_id);
rte_flow_error_set(error, EFAULT,
return -rte_errno;
}
- RTE_LOG(DEBUG, PMD,
- "Action: Assign packets to queue %d, tc:%d, q:%d\n",
+ MRVL_LOG(DEBUG,
+ "Action: Assign packets to queue %d, tc:%d, q:%d",
q->index, priv->rxq_map[q->index].tc,
priv->rxq_map[q->index].inq);
memset(&priv->cls_tbl_params, 0, sizeof(priv->cls_tbl_params));
priv->cls_tbl_params.type = mrvl_engine_type(first_flow);
- RTE_LOG(INFO, PMD, "Setting cls search engine type to %s\n",
+ MRVL_LOG(INFO, "Setting cls search engine type to %s",
priv->cls_tbl_params.type == PP2_CLS_TBL_EXACT_MATCH ?
"exact" : "maskable");
priv->cls_tbl_params.max_num_rules = MRVL_CLS_MAX_NUM_RULES;
cfg->port[port].outq[outq].sched_mode =
PP2_PPIO_SCHED_M_WRR;
} else {
- RTE_LOG(ERR, PMD, "Unknown token: %s\n", entry);
+ MRVL_LOG(ERR, "Unknown token: %s", entry);
return -1;
}
}
* global port rate limiting has priority.
*/
if (cfg->port[port].rate_limit_enable) {
- RTE_LOG(WARNING, PMD, "Port %d rate limiting already enabled\n",
+ MRVL_LOG(WARNING, "Port %d rate limiting already enabled",
port);
return 0;
}
RTE_DIM(cfg->port[port].tc[tc].inq),
MRVL_PP2_RXQ_MAX);
if (n < 0) {
- RTE_LOG(ERR, PMD, "Error %d while parsing: %s\n",
+ MRVL_LOG(ERR, "Error %d while parsing: %s",
n, entry);
return n;
}
RTE_DIM(cfg->port[port].tc[tc].pcp),
MAX_PCP);
if (n < 0) {
- RTE_LOG(ERR, PMD, "Error %d while parsing: %s\n",
+ MRVL_LOG(ERR, "Error %d while parsing: %s",
n, entry);
return n;
}
RTE_DIM(cfg->port[port].tc[tc].dscp),
MAX_DSCP);
if (n < 0) {
- RTE_LOG(ERR, PMD, "Error %d while parsing: %s\n",
+ MRVL_LOG(ERR, "Error %d while parsing: %s",
n, entry);
return n;
}
sizeof(MRVL_TOK_PLCR_DEFAULT_COLOR_RED))) {
cfg->port[port].tc[tc].color = PP2_PPIO_COLOR_RED;
} else {
- RTE_LOG(ERR, PMD, "Error while parsing: %s\n", entry);
+ MRVL_LOG(ERR, "Error while parsing: %s", entry);
return -1;
}
}
if (n == 0) {
/* This is weird, but not bad. */
- RTE_LOG(WARNING, PMD, "Empty configuration file?\n");
+ MRVL_LOG(WARNING, "Empty configuration file?");
return 0;
}
return -1;
(*cfg)->port[n].default_tc = (uint8_t)val;
} else {
- RTE_LOG(ERR, PMD,
- "Default Traffic Class required in custom configuration!\n");
+ MRVL_LOG(ERR,
+ "Default Traffic Class required in custom configuration!");
return -1;
}
sizeof(MRVL_TOK_PLCR_UNIT_PACKETS))) {
unit = PP2_CLS_PLCR_PACKETS_TOKEN_UNIT;
} else {
- RTE_LOG(ERR, PMD, "Unknown token: %s\n",
+ MRVL_LOG(ERR, "Unknown token: %s",
entry);
return -1;
}
sizeof(MRVL_TOK_PLCR_COLOR_AWARE))) {
mode = PP2_CLS_PLCR_COLOR_AWARE_MODE;
} else {
- RTE_LOG(ERR, PMD,
- "Error in parsing: %s\n",
+ MRVL_LOG(ERR,
+ "Error in parsing: %s",
entry);
return -1;
}
ret = pp2_cls_plcr_init(params, &priv->policer);
if (ret) {
- RTE_LOG(ERR, PMD, "Failed to setup %s\n", match);
+ MRVL_LOG(ERR, "Failed to setup %s", match);
return -1;
}
for (tc = 0; tc < RTE_DIM(port_cfg->tc); ++tc) {
if (port_cfg->tc[tc].pcps > RTE_DIM(port_cfg->tc[0].pcp)) {
/* Better safe than sorry. */
- RTE_LOG(ERR, PMD,
- "Too many PCPs configured in TC %zu!\n", tc);
+ MRVL_LOG(ERR,
+ "Too many PCPs configured in TC %zu!", tc);
return -1;
}
for (i = 0; i < port_cfg->tc[tc].pcps; ++i) {
for (tc = 0; tc < RTE_DIM(port_cfg->tc); ++tc) {
if (port_cfg->tc[tc].dscps > RTE_DIM(port_cfg->tc[0].dscp)) {
/* Better safe than sorry. */
- RTE_LOG(ERR, PMD,
- "Too many DSCPs configured in TC %zu!\n", tc);
+ MRVL_LOG(ERR,
+ "Too many DSCPs configured in TC %zu!", tc);
return -1;
}
for (i = 0; i < port_cfg->tc[tc].dscps; ++i) {
for (tc = 0; tc < RTE_DIM(port_cfg->tc); ++tc) {
if (port_cfg->tc[tc].inqs > RTE_DIM(port_cfg->tc[0].inq)) {
/* Overflow. */
- RTE_LOG(ERR, PMD,
- "Too many RX queues configured per TC %zu!\n",
+ MRVL_LOG(ERR,
+ "Too many RX queues configured per TC %zu!",
tc);
return -1;
}
uint8_t idx = port_cfg->tc[tc].inq[i];
if (idx > RTE_DIM(priv->rxq_map)) {
- RTE_LOG(ERR, PMD, "Bad queue index %d!\n", idx);
+ MRVL_LOG(ERR, "Bad queue index %d!", idx);
return -1;
}
size_t i;
if (priv->ppio == NULL) {
- RTE_LOG(ERR, PMD, "ppio must not be NULL here!\n");
+ MRVL_LOG(ERR, "ppio must not be NULL here!");
return -1;
}