#define MRVL_IFACE_NAME_ARG "iface"
#define MRVL_CFG_ARG "cfg"
-#define MRVL_BURST_SIZE 64
-
#define MRVL_ARP_LENGTH 28
#define MRVL_COOKIE_ADDR_INVALID ~0ULL
#define MRVL_COOKIE_HIGH_ADDR_MASK 0xffffff0000000000
/** Port Rx offload capabilities */
-#define MRVL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_FILTER | \
- DEV_RX_OFFLOAD_JUMBO_FRAME | \
- DEV_RX_OFFLOAD_CHECKSUM)
+#define MRVL_RX_OFFLOADS (RTE_ETH_RX_OFFLOAD_VLAN_FILTER | \
+ RTE_ETH_RX_OFFLOAD_CHECKSUM)
/** Port Tx offloads capabilities */
-#define MRVL_TX_OFFLOAD_CHECKSUM (DEV_TX_OFFLOAD_IPV4_CKSUM | \
- DEV_TX_OFFLOAD_UDP_CKSUM | \
- DEV_TX_OFFLOAD_TCP_CKSUM)
+#define MRVL_TX_OFFLOAD_CHECKSUM (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM)
#define MRVL_TX_OFFLOADS (MRVL_TX_OFFLOAD_CHECKSUM | \
- DEV_TX_OFFLOAD_MULTI_SEGS)
+ RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
-#define MRVL_TX_PKT_OFFLOADS (PKT_TX_IP_CKSUM | \
- PKT_TX_TCP_CKSUM | \
- PKT_TX_UDP_CKSUM)
+#define MRVL_TX_PKT_OFFLOADS (RTE_MBUF_F_TX_IP_CKSUM | \
+ RTE_MBUF_F_TX_TCP_CKSUM | \
+ RTE_MBUF_F_TX_UDP_CKSUM)
static const char * const valid_args[] = {
MRVL_IFACE_NAME_ARG,
static struct pp2_bpool *mrvl_port_to_bpool_lookup[RTE_MAX_ETHPORTS];
static int mrvl_port_bpool_size[PP2_NUM_PKT_PROC][PP2_BPOOL_NUM_POOLS][RTE_MAX_LCORE];
static uint64_t cookie_addr_high = MRVL_COOKIE_ADDR_INVALID;
+static int dummy_pool_id[PP2_NUM_PKT_PROC];
+struct pp2_bpool *dummy_pool[PP2_NUM_PKT_PROC] = {0};
struct mrvl_ifnames {
const char *names[PP2_NUM_ETH_PPIO * PP2_NUM_PKT_PROC];
mrvl_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on);
static int mrvl_promiscuous_enable(struct rte_eth_dev *dev);
static int mrvl_allmulticast_enable(struct rte_eth_dev *dev);
+static int
+mrvl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf);
#define MRVL_XSTATS_TBL_ENTRY(name) { \
#name, offsetof(struct pp2_ppio_statistics, name), \
MRVL_XSTATS_TBL_ENTRY(tx_errors)
};
+static inline int
+mrvl_reserve_bit(int *bitmap, int max)
+{
+ int n = sizeof(*bitmap) * 8 - __builtin_clz(*bitmap);
+
+ if (n >= max)
+ return -1;
+
+ *bitmap |= 1 << n;
+
+ return n;
+}
+
+static int
+mrvl_pp2_fixup_init(void)
+{
+ struct pp2_bpool_params bpool_params;
+ char name[15];
+ int err, i;
+
+ memset(dummy_pool, 0, sizeof(dummy_pool));
+ for (i = 0; i < pp2_get_num_inst(); i++) {
+ dummy_pool_id[i] = mrvl_reserve_bit(&used_bpools[i],
+ PP2_BPOOL_NUM_POOLS);
+ if (dummy_pool_id[i] < 0) {
+ MRVL_LOG(ERR, "Can't find free pool\n");
+ return -1;
+ }
+
+ memset(name, 0, sizeof(name));
+ snprintf(name, sizeof(name), "pool-%d:%d", i, dummy_pool_id[i]);
+ memset(&bpool_params, 0, sizeof(bpool_params));
+ bpool_params.match = name;
+ bpool_params.buff_len = MRVL_PKT_OFFS;
+ bpool_params.dummy_short_pool = 1;
+ err = pp2_bpool_init(&bpool_params, &dummy_pool[i]);
+ if (err != 0 || !dummy_pool[i]) {
+ MRVL_LOG(ERR, "BPool init failed!\n");
+ used_bpools[i] &= ~(1 << dummy_pool_id[i]);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * Initialize packet processor.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int
+mrvl_init_pp2(void)
+{
+ struct pp2_init_params init_params;
+ int err;
+
+ memset(&init_params, 0, sizeof(init_params));
+ init_params.hif_reserved_map = MRVL_MUSDK_HIFS_RESERVED;
+ init_params.bm_pool_reserved_map = MRVL_MUSDK_BPOOLS_RESERVED;
+ init_params.rss_tbl_reserved_map = MRVL_MUSDK_RSS_RESERVED;
+ if (mrvl_cfg && mrvl_cfg->pp2_cfg.prs_udfs.num_udfs)
+ memcpy(&init_params.prs_udfs, &mrvl_cfg->pp2_cfg.prs_udfs,
+ sizeof(struct pp2_parse_udfs));
+ err = pp2_init(&init_params);
+ if (err != 0) {
+ MRVL_LOG(ERR, "PP2 init failed");
+ return -1;
+ }
+
+ err = mrvl_pp2_fixup_init();
+ if (err != 0) {
+ MRVL_LOG(ERR, "PP2 fixup init failed");
+ return -1;
+ }
+
+ return 0;
+}
+
+static void
+mrvl_pp2_fixup_deinit(void)
+{
+ int i;
+
+ for (i = 0; i < PP2_NUM_PKT_PROC; i++) {
+ if (!dummy_pool[i])
+ continue;
+ pp2_bpool_deinit(dummy_pool[i]);
+ used_bpools[i] &= ~(1 << dummy_pool_id[i]);
+ }
+}
+
+/**
+ * Deinitialize packet processor.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static void
+mrvl_deinit_pp2(void)
+{
+ mrvl_pp2_fixup_deinit();
+ pp2_deinit();
+}
+
static inline void
mrvl_fill_shadowq(struct mrvl_shadow_txq *sq, struct rte_mbuf *buf)
{
sq->size++;
}
+/**
+ * Deinitialize per-lcore MUSDK hardware interfaces (hifs).
+ */
+static void
+mrvl_deinit_hifs(void)
+{
+ int i;
+
+ for (i = mrvl_lcore_first; i <= mrvl_lcore_last; i++) {
+ if (hifs[i])
+ pp2_hif_deinit(hifs[i]);
+ }
+ used_hifs = MRVL_MUSDK_HIFS_RESERVED;
+ memset(hifs, 0, sizeof(hifs));
+}
+
static inline void
mrvl_fill_desc(struct pp2_ppio_desc *desc, struct rte_mbuf *buf)
{
return size;
}
-static inline int
-mrvl_reserve_bit(int *bitmap, int max)
-{
- int n = sizeof(*bitmap) * 8 - __builtin_clz(*bitmap);
-
- if (n >= max)
- return -1;
-
- *bitmap |= 1 << n;
-
- return n;
-}
-
static int
mrvl_init_hif(int core_id)
{
if (rss_conf->rss_hf == 0) {
priv->ppio_params.inqs_params.hash_type = PP2_PPIO_HASH_T_NONE;
- } else if (rss_conf->rss_hf & ETH_RSS_IPV4) {
+ } else if (rss_conf->rss_hf & RTE_ETH_RSS_IPV4) {
priv->ppio_params.inqs_params.hash_type =
PP2_PPIO_HASH_T_2_TUPLE;
- } else if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
+ } else if (rss_conf->rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) {
priv->ppio_params.inqs_params.hash_type =
PP2_PPIO_HASH_T_5_TUPLE;
priv->rss_hf_tcp = 1;
- } else if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
+ } else if (rss_conf->rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) {
priv->ppio_params.inqs_params.hash_type =
PP2_PPIO_HASH_T_5_TUPLE;
priv->rss_hf_tcp = 0;
return -EINVAL;
}
- if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_NONE &&
- dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
+ if (dev->data->dev_conf.rxmode.mq_mode != RTE_ETH_MQ_RX_NONE &&
+ dev->data->dev_conf.rxmode.mq_mode != RTE_ETH_MQ_RX_RSS) {
MRVL_LOG(INFO, "Unsupported rx multi queue mode %d",
dev->data->dev_conf.rxmode.mq_mode);
return -EINVAL;
return -EINVAL;
}
- if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
- dev->data->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len -
- MRVL_PP2_ETH_HDRS_LEN;
+ if (dev->data->dev_conf.rxmode.mtu > priv->max_mtu) {
+ MRVL_LOG(ERR, "MTU %u is larger than max_mtu %u\n",
+ dev->data->dev_conf.rxmode.mtu,
+ priv->max_mtu);
+ return -EINVAL;
+ }
- if (dev->data->dev_conf.txmode.offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+ if (dev->data->dev_conf.txmode.offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
priv->multiseg = 1;
ret = mrvl_configure_rxqs(priv, dev->data->port_id,
return ret;
if (dev->data->nb_rx_queues == 1 &&
- dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
+ dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) {
MRVL_LOG(WARNING, "Disabling hash for 1 rx queue");
priv->ppio_params.inqs_params.hash_type = PP2_PPIO_HASH_T_NONE;
-
+ priv->configured = 1;
return 0;
}
- return mrvl_configure_rss(priv,
+ ret = mrvl_configure_rss(priv,
&dev->data->dev_conf.rx_adv_conf.rss_conf);
+ if (ret < 0)
+ return ret;
+
+ priv->configured = 1;
+
+ return 0;
}
/**
if (mru - RTE_ETHER_CRC_LEN + MRVL_PKT_OFFS > mbuf_data_size) {
mru = mbuf_data_size + RTE_ETHER_CRC_LEN - MRVL_PKT_OFFS;
mtu = MRVL_PP2_MRU_TO_MTU(mru);
- MRVL_LOG(WARNING, "MTU too big, max MTU possible limitted "
+ MRVL_LOG(WARNING, "MTU too big, max MTU possible limited "
"by current mbuf size: %u. Set MTU to %u, MRU to %u",
mbuf_data_size, mtu, mru);
}
return -EINVAL;
}
- dev->data->mtu = mtu;
- dev->data->dev_conf.rxmode.max_rx_pkt_len = mru - MV_MH_SIZE;
-
if (!priv->ppio)
return 0;
int ret;
if (!priv->ppio) {
- dev->data->dev_link.link_status = ETH_LINK_UP;
+ dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
return 0;
}
return ret;
}
- dev->data->dev_link.link_status = ETH_LINK_UP;
+ dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
return 0;
}
int ret;
if (!priv->ppio) {
- dev->data->dev_link.link_status = ETH_LINK_DOWN;
+ dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
return 0;
}
ret = pp2_ppio_disable(priv->ppio);
if (ret)
return ret;
- dev->data->dev_link.link_status = ETH_LINK_DOWN;
+ dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
return 0;
}
priv->pp_id, priv->ppio_id);
priv->ppio_params.match = match;
priv->ppio_params.eth_start_hdr = PP2_PPIO_HDR_ETH;
- if (mrvl_cfg)
+ priv->forward_bad_frames = 0;
+ priv->fill_bpool_buffs = MRVL_BURST_SIZE;
+
+ if (mrvl_cfg) {
priv->ppio_params.eth_start_hdr =
mrvl_cfg->port[dev->data->port_id].eth_start_hdr;
+ priv->forward_bad_frames =
+ mrvl_cfg->port[dev->data->port_id].forward_bad_frames;
+ priv->fill_bpool_buffs =
+ mrvl_cfg->port[dev->data->port_id].fill_bpool_buffs;
+ }
/*
* Calculate the minimum bpool size for refill feature as follows:
if (dev->data->all_multicast == 1)
mrvl_allmulticast_enable(dev);
- if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
+ if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
ret = mrvl_populate_vlan_table(dev, 1);
if (ret) {
MRVL_LOG(ERR, "Failed to populate VLAN table");
/* For default QoS config, don't start classifier. */
if (mrvl_cfg &&
- mrvl_cfg->port[dev->data->port_id].use_global_defaults == 0) {
+ mrvl_cfg->port[dev->data->port_id].use_qos_global_defaults == 0) {
ret = mrvl_start_qos_mapping(priv);
if (ret) {
MRVL_LOG(ERR, "Failed to setup QoS mapping");
if (dev->data->promiscuous == 1)
mrvl_promiscuous_enable(dev);
- if (dev->data->dev_link.link_status == ETH_LINK_UP) {
+ if (priv->flow_ctrl) {
+ ret = mrvl_flow_ctrl_set(dev, &priv->fc_conf);
+ if (ret) {
+ MRVL_LOG(ERR, "Failed to configure flow control");
+ goto out;
+ }
+ priv->flow_ctrl = 0;
+ }
+
+ if (dev->data->dev_link.link_status == RTE_ETH_LINK_UP) {
ret = mrvl_dev_set_link_up(dev);
if (ret) {
MRVL_LOG(ERR, "Failed to set link up");
- dev->data->dev_link.link_status = ETH_LINK_DOWN;
+ dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
goto out;
}
}
switch (ethtool_cmd_speed(&edata)) {
case SPEED_10:
- dev->data->dev_link.link_speed = ETH_SPEED_NUM_10M;
+ dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_10M;
break;
case SPEED_100:
- dev->data->dev_link.link_speed = ETH_SPEED_NUM_100M;
+ dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_100M;
break;
case SPEED_1000:
- dev->data->dev_link.link_speed = ETH_SPEED_NUM_1G;
+ dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_1G;
+ break;
+ case SPEED_2500:
+ dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_2_5G;
break;
case SPEED_10000:
- dev->data->dev_link.link_speed = ETH_SPEED_NUM_10G;
+ dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_10G;
break;
default:
- dev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
+ dev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
}
- dev->data->dev_link.link_duplex = edata.duplex ? ETH_LINK_FULL_DUPLEX :
- ETH_LINK_HALF_DUPLEX;
- dev->data->dev_link.link_autoneg = edata.autoneg ? ETH_LINK_AUTONEG :
- ETH_LINK_FIXED;
+ dev->data->dev_link.link_duplex = edata.duplex ? RTE_ETH_LINK_FULL_DUPLEX :
+ RTE_ETH_LINK_HALF_DUPLEX;
+ dev->data->dev_link.link_autoneg = edata.autoneg ? RTE_ETH_LINK_AUTONEG :
+ RTE_ETH_LINK_FIXED;
pp2_ppio_get_link_state(priv->ppio, &link_up);
- dev->data->dev_link.link_status = link_up ? ETH_LINK_UP : ETH_LINK_DOWN;
+ dev->data->dev_link.link_status = link_up ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
return 0;
}
* Info structure output buffer.
*/
static int
-mrvl_dev_infos_get(struct rte_eth_dev *dev __rte_unused,
+mrvl_dev_infos_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *info)
{
- info->speed_capa = ETH_LINK_SPEED_10M |
- ETH_LINK_SPEED_100M |
- ETH_LINK_SPEED_1G |
- ETH_LINK_SPEED_10G;
+ struct mrvl_priv *priv = dev->data->dev_private;
+
+ info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
+
+ info->speed_capa = RTE_ETH_LINK_SPEED_10M |
+ RTE_ETH_LINK_SPEED_100M |
+ RTE_ETH_LINK_SPEED_1G |
+ RTE_ETH_LINK_SPEED_2_5G |
+ RTE_ETH_LINK_SPEED_10G;
info->max_rx_queues = MRVL_PP2_RXQ_MAX;
info->max_tx_queues = MRVL_PP2_TXQ_MAX;
info->tx_offload_capa = MRVL_TX_OFFLOADS;
info->tx_queue_offload_capa = MRVL_TX_OFFLOADS;
- info->flow_type_rss_offloads = ETH_RSS_IPV4 |
- ETH_RSS_NONFRAG_IPV4_TCP |
- ETH_RSS_NONFRAG_IPV4_UDP;
+ info->flow_type_rss_offloads = RTE_ETH_RSS_IPV4 |
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+ RTE_ETH_RSS_NONFRAG_IPV4_UDP;
/* By default packets are dropped if no descriptors are available */
info->default_rxconf.rx_drop_en = 1;
info->max_rx_pktlen = MRVL_PKT_SIZE_MAX;
+ info->max_mtu = priv->max_mtu;
return 0;
}
uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
int ret;
- if (mask & ETH_VLAN_STRIP_MASK)
+ if (mask & RTE_ETH_VLAN_STRIP_MASK) {
MRVL_LOG(ERR, "VLAN stripping is not supported\n");
+ return -ENOTSUP;
+ }
- if (mask & ETH_VLAN_FILTER_MASK) {
- if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+ if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+ if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
ret = mrvl_populate_vlan_table(dev, 1);
else
ret = mrvl_populate_vlan_table(dev, 0);
return ret;
}
- if (mask & ETH_VLAN_EXTEND_MASK)
+ if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
MRVL_LOG(ERR, "Extend VLAN not supported\n");
+ return -ENOTSUP;
+ }
return 0;
}
struct mrvl_priv *priv = dev->data->dev_private;
struct mrvl_rxq *rxq;
uint32_t frame_size, buf_size = rte_pktmbuf_data_room_size(mp);
- uint32_t max_rx_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
+ uint32_t max_rx_pktlen = dev->data->mtu + RTE_ETHER_HDR_LEN;
int ret, tc, inq;
uint64_t offloads;
return -EFAULT;
}
- frame_size = buf_size - RTE_PKTMBUF_HEADROOM -
- MRVL_PKT_EFFEC_OFFS + RTE_ETHER_CRC_LEN;
- if (frame_size < max_rx_pkt_len) {
+ frame_size = buf_size - RTE_PKTMBUF_HEADROOM - MRVL_PKT_EFFEC_OFFS;
+ if (frame_size < max_rx_pktlen) {
MRVL_LOG(WARNING,
"Mbuf size must be increased to %u bytes to hold up "
"to %u bytes of data.",
- buf_size + max_rx_pkt_len - frame_size,
- max_rx_pkt_len);
- dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
- MRVL_LOG(INFO, "Setting max rx pkt len to %u",
- dev->data->dev_conf.rxmode.max_rx_pkt_len);
+ max_rx_pktlen + buf_size - frame_size,
+ max_rx_pktlen);
+ dev->data->mtu = frame_size - RTE_ETHER_HDR_LEN;
+ MRVL_LOG(INFO, "Setting MTU to %u", dev->data->mtu);
}
if (dev->data->rx_queues[idx]) {
rxq->priv = priv;
rxq->mp = mp;
- rxq->cksum_enabled = offloads & DEV_RX_OFFLOAD_IPV4_CKSUM;
+ rxq->cksum_enabled = offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM;
rxq->queue_id = idx;
rxq->port_id = dev->data->port_id;
mrvl_port_to_bpool_lookup[rxq->port_id] = priv->bpool;
/**
* DPDK callback to release the receive queue.
*
- * @param rxq
- * Generic receive queue pointer.
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param qid
+ * Receive queue index.
*/
static void
-mrvl_rx_queue_release(void *rxq)
+mrvl_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
- struct mrvl_rxq *q = rxq;
+ struct mrvl_rxq *q = dev->data->rx_queues[qid];
struct pp2_ppio_tc_params *tc_params;
int i, num, tc, inq;
struct pp2_hif *hif;
/**
* DPDK callback to release the transmit queue.
*
- * @param txq
- * Generic transmit queue pointer.
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param qid
+ * Transmit queue index.
*/
static void
-mrvl_tx_queue_release(void *txq)
+mrvl_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
- struct mrvl_txq *q = txq;
+ struct mrvl_txq *q = dev->data->tx_queues[qid];
if (!q)
return;
struct mrvl_priv *priv = dev->data->dev_private;
int ret, en;
- if (!priv)
- return -EPERM;
+ if (!priv->ppio) {
+ memcpy(fc_conf, &priv->fc_conf, sizeof(struct rte_eth_fc_conf));
+ return 0;
+ }
+ fc_conf->autoneg = 1;
ret = pp2_ppio_get_rx_pause(priv->ppio, &en);
if (ret) {
MRVL_LOG(ERR, "Failed to read rx pause state");
return ret;
}
- fc_conf->mode = en ? RTE_FC_RX_PAUSE : RTE_FC_NONE;
+ fc_conf->mode = en ? RTE_ETH_FC_RX_PAUSE : RTE_ETH_FC_NONE;
ret = pp2_ppio_get_tx_pause(priv->ppio, &en);
if (ret) {
}
if (en) {
- if (fc_conf->mode == RTE_FC_NONE)
- fc_conf->mode = RTE_FC_TX_PAUSE;
+ if (fc_conf->mode == RTE_ETH_FC_NONE)
+ fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
else
- fc_conf->mode = RTE_FC_FULL;
+ fc_conf->mode = RTE_ETH_FC_FULL;
}
return 0;
int ret;
int rx_en, tx_en;
- if (!priv)
- return -EPERM;
-
if (fc_conf->high_water ||
fc_conf->low_water ||
fc_conf->pause_time ||
- fc_conf->mac_ctrl_frame_fwd ||
- fc_conf->autoneg) {
+ fc_conf->mac_ctrl_frame_fwd) {
MRVL_LOG(ERR, "Flowctrl parameter is not supported");
return -EINVAL;
}
+ if (fc_conf->autoneg == 0) {
+ MRVL_LOG(ERR, "Flowctrl Autoneg disable is not supported");
+ return -EINVAL;
+ }
+
+ if (!priv->ppio) {
+ memcpy(&priv->fc_conf, fc_conf, sizeof(struct rte_eth_fc_conf));
+ priv->flow_ctrl = 1;
+ return 0;
+ }
+
switch (fc_conf->mode) {
- case RTE_FC_FULL:
+ case RTE_ETH_FC_FULL:
rx_en = 1;
tx_en = 1;
break;
- case RTE_FC_TX_PAUSE:
+ case RTE_ETH_FC_TX_PAUSE:
rx_en = 0;
tx_en = 1;
break;
- case RTE_FC_RX_PAUSE:
+ case RTE_ETH_FC_RX_PAUSE:
rx_en = 1;
tx_en = 0;
break;
- case RTE_FC_NONE:
+ case RTE_ETH_FC_NONE:
rx_en = 0;
tx_en = 0;
break;
if (hash_type == PP2_PPIO_HASH_T_NONE)
rss_conf->rss_hf = 0;
else if (hash_type == PP2_PPIO_HASH_T_2_TUPLE)
- rss_conf->rss_hf = ETH_RSS_IPV4;
+ rss_conf->rss_hf = RTE_ETH_RSS_IPV4;
else if (hash_type == PP2_PPIO_HASH_T_5_TUPLE && priv->rss_hf_tcp)
- rss_conf->rss_hf = ETH_RSS_NONFRAG_IPV4_TCP;
+ rss_conf->rss_hf = RTE_ETH_RSS_NONFRAG_IPV4_TCP;
else if (hash_type == PP2_PPIO_HASH_T_5_TUPLE && !priv->rss_hf_tcp)
- rss_conf->rss_hf = ETH_RSS_NONFRAG_IPV4_UDP;
+ rss_conf->rss_hf = RTE_ETH_RSS_NONFRAG_IPV4_UDP;
return 0;
}
*
* @param dev
* Pointer to the device structure.
- * @param filer_type
- * Flow filter type.
- * @param filter_op
- * Flow filter operation.
- * @param arg
+ * @param ops
* Pointer to pass the flow ops.
*
* @return
* 0 on success, negative error value otherwise.
*/
static int
-mrvl_eth_filter_ctrl(struct rte_eth_dev *dev __rte_unused,
- enum rte_filter_type filter_type,
- enum rte_filter_op filter_op, void *arg)
+mrvl_eth_flow_ops_get(struct rte_eth_dev *dev __rte_unused,
+ const struct rte_flow_ops **ops)
{
- switch (filter_type) {
- case RTE_ETH_FILTER_GENERIC:
- if (filter_op != RTE_ETH_FILTER_GET)
- return -EINVAL;
- *(const void **)arg = &mrvl_flow_ops;
- return 0;
- default:
- MRVL_LOG(WARNING, "Filter type (%d) not supported",
- filter_type);
- return -EINVAL;
- }
+ *ops = &mrvl_flow_ops;
+ return 0;
}
/**
.flow_ctrl_set = mrvl_flow_ctrl_set,
.rss_hash_update = mrvl_rss_hash_update,
.rss_hash_conf_get = mrvl_rss_hash_conf_get,
- .filter_ctrl = mrvl_eth_filter_ctrl,
+ .flow_ops_get = mrvl_eth_flow_ops_get,
.mtr_ops_get = mrvl_mtr_ops_get,
.tm_ops_get = mrvl_tm_ops_get,
};
* Mbuf offload flags.
*/
static inline uint64_t
-mrvl_desc_to_ol_flags(struct pp2_ppio_desc *desc)
+mrvl_desc_to_ol_flags(struct pp2_ppio_desc *desc, uint64_t packet_type)
{
- uint64_t flags;
+ uint64_t flags = 0;
enum pp2_inq_desc_status status;
- status = pp2_ppio_inq_desc_get_l3_pkt_error(desc);
- if (unlikely(status != PP2_DESC_ERR_OK))
- flags = PKT_RX_IP_CKSUM_BAD;
- else
- flags = PKT_RX_IP_CKSUM_GOOD;
+ if (RTE_ETH_IS_IPV4_HDR(packet_type)) {
+ status = pp2_ppio_inq_desc_get_l3_pkt_error(desc);
+ if (unlikely(status != PP2_DESC_ERR_OK))
+ flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
+ else
+ flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
+ }
- status = pp2_ppio_inq_desc_get_l4_pkt_error(desc);
- if (unlikely(status != PP2_DESC_ERR_OK))
- flags |= PKT_RX_L4_CKSUM_BAD;
- else
- flags |= PKT_RX_L4_CKSUM_GOOD;
+ if (((packet_type & RTE_PTYPE_L4_UDP) == RTE_PTYPE_L4_UDP) ||
+ ((packet_type & RTE_PTYPE_L4_TCP) == RTE_PTYPE_L4_TCP)) {
+ status = pp2_ppio_inq_desc_get_l4_pkt_error(desc);
+ if (unlikely(status != PP2_DESC_ERR_OK))
+ flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
+ else
+ flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
+ }
return flags;
}
/* drop packet in case of mac, overrun or resource error */
status = pp2_ppio_inq_desc_get_l2_pkt_error(&descs[i]);
- if (unlikely(status != PP2_DESC_ERR_OK)) {
+ if ((unlikely(status != PP2_DESC_ERR_OK)) &&
+ !(q->priv->forward_bad_frames)) {
struct pp2_buff_inf binf = {
.addr = rte_mbuf_data_iova_default(mbuf),
.cookie = (uint64_t)mbuf,
mbuf->l3_len = l4_offset - l3_offset;
if (likely(q->cksum_enabled))
- mbuf->ol_flags = mrvl_desc_to_ol_flags(&descs[i]);
+ mbuf->ol_flags =
+ mrvl_desc_to_ol_flags(&descs[i],
+ mbuf->packet_type);
rx_pkts[rx_done++] = mbuf;
q->bytes_recv += mbuf->pkt_len;
if (unlikely(num <= q->priv->bpool_min_size ||
(!rx_done && num < q->priv->bpool_init_size))) {
- mrvl_fill_bpool(q, MRVL_BURST_SIZE);
+ mrvl_fill_bpool(q, q->priv->fill_bpool_buffs);
} else if (unlikely(num > q->priv->bpool_max_size)) {
int i;
int pkt_to_remove = num - q->priv->bpool_init_size;
* default value
*/
*l3_type = PP2_OUTQ_L3_TYPE_IPV4;
- *gen_l3_cksum = ol_flags & PKT_TX_IP_CKSUM ? 1 : 0;
+ *gen_l3_cksum = ol_flags & RTE_MBUF_F_TX_IP_CKSUM ? 1 : 0;
- if (ol_flags & PKT_TX_IPV6) {
+ if (ol_flags & RTE_MBUF_F_TX_IPV6) {
*l3_type = PP2_OUTQ_L3_TYPE_IPV6;
/* no checksum for ipv6 header */
*gen_l3_cksum = 0;
}
- if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM) {
+ if ((ol_flags & RTE_MBUF_F_TX_L4_MASK) == RTE_MBUF_F_TX_TCP_CKSUM) {
*l4_type = PP2_OUTQ_L4_TYPE_TCP;
*gen_l4_cksum = 1;
- } else if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM) {
+ } else if ((ol_flags & RTE_MBUF_F_TX_L4_MASK) == RTE_MBUF_F_TX_UDP_CKSUM) {
*l4_type = PP2_OUTQ_L4_TYPE_UDP;
*gen_l4_cksum = 1;
} else {
return nb_pkts;
}
-/**
- * Initialize packet processor.
- *
- * @return
- * 0 on success, negative error value otherwise.
- */
-static int
-mrvl_init_pp2(void)
-{
- struct pp2_init_params init_params;
-
- memset(&init_params, 0, sizeof(init_params));
- init_params.hif_reserved_map = MRVL_MUSDK_HIFS_RESERVED;
- init_params.bm_pool_reserved_map = MRVL_MUSDK_BPOOLS_RESERVED;
- init_params.rss_tbl_reserved_map = MRVL_MUSDK_RSS_RESERVED;
- if (mrvl_cfg && mrvl_cfg->pp2_cfg.prs_udfs.num_udfs)
- memcpy(&init_params.prs_udfs, &mrvl_cfg->pp2_cfg.prs_udfs,
- sizeof(struct pp2_parse_udfs));
- return pp2_init(&init_params);
-}
-
-/**
- * Deinitialize packet processor.
- *
- * @return
- * 0 on success, negative error value otherwise.
- */
-static void
-mrvl_deinit_pp2(void)
-{
- pp2_deinit();
-}
-
/**
* Create private device structure.
*
struct pp2_bpool_params bpool_params;
char match[MRVL_MATCH_LEN];
struct mrvl_priv *priv;
+ uint16_t max_frame_size;
int ret, bpool_bit;
priv = rte_zmalloc_socket(dev_name, sizeof(*priv), 0, rte_socket_id());
if (ret)
goto out_free_priv;
+ ret = pp2_ppio_get_l4_cksum_max_frame_size(priv->pp_id, priv->ppio_id,
+ &max_frame_size);
+ if (ret)
+ goto out_free_priv;
+
+ priv->max_mtu = max_frame_size + RTE_ETHER_CRC_LEN -
+ MRVL_PP2_ETH_HDRS_LEN;
+
bpool_bit = mrvl_reserve_bit(&used_bpools[priv->pp_id],
PP2_BPOOL_NUM_POOLS);
if (bpool_bit < 0)
eth_dev->dev_ops = &mrvl_ops;
eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
- eth_dev->data->dev_link.link_status = ETH_LINK_UP;
+ eth_dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
rte_eth_dev_probing_finish(eth_dev);
return 0;
return 0;
}
-/**
- * Deinitialize per-lcore MUSDK hardware interfaces (hifs).
- */
-static void
-mrvl_deinit_hifs(void)
-{
- int i;
-
- for (i = mrvl_lcore_first; i <= mrvl_lcore_last; i++) {
- if (hifs[i])
- pp2_hif_deinit(hifs[i]);
- }
- used_hifs = MRVL_MUSDK_HIFS_RESERVED;
- memset(hifs, 0, sizeof(hifs));
-}
-
/**
* DPDK callback to register the virtual device.
*
RTE_PMD_REGISTER_VDEV(net_mvpp2, pmd_mrvl_drv);
RTE_PMD_REGISTER_ALIAS(net_mvpp2, eth_mvpp2);
-RTE_LOG_REGISTER(mrvl_logtype, pmd.net.mvpp2, NOTICE);
+RTE_LOG_REGISTER_DEFAULT(mrvl_logtype, NOTICE);