1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Cavium, Inc
5 #include <rte_ethdev_driver.h>
6 #include <rte_ethdev_pci.h>
7 #include <rte_cycles.h>
8 #include <rte_malloc.h>
10 #include <rte_ether.h>
13 #include "lio_23xx_vf.h"
14 #include "lio_ethdev.h"
18 int lio_logtype_driver;
20 /* Default RSS key in use */
21 static uint8_t lio_rss_key[40] = {
22 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
23 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
24 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
25 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
26 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
29 static const struct rte_eth_desc_lim lio_rx_desc_lim = {
30 .nb_max = CN23XX_MAX_OQ_DESCRIPTORS,
31 .nb_min = CN23XX_MIN_OQ_DESCRIPTORS,
35 static const struct rte_eth_desc_lim lio_tx_desc_lim = {
36 .nb_max = CN23XX_MAX_IQ_DESCRIPTORS,
37 .nb_min = CN23XX_MIN_IQ_DESCRIPTORS,
41 /* Wait for control command to reach nic. */
43 lio_wait_for_ctrl_cmd(struct lio_device *lio_dev,
44 struct lio_dev_ctrl_cmd *ctrl_cmd)
46 uint16_t timeout = LIO_MAX_CMD_TIMEOUT;
48 while ((ctrl_cmd->cond == 0) && --timeout) {
49 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
57 * \brief Send Rx control command
58 * @param eth_dev Pointer to the structure rte_eth_dev
59 * @param start_stop whether to start or stop
62 lio_send_rx_ctrl_cmd(struct rte_eth_dev *eth_dev, int start_stop)
64 struct lio_device *lio_dev = LIO_DEV(eth_dev);
65 struct lio_dev_ctrl_cmd ctrl_cmd;
66 struct lio_ctrl_pkt ctrl_pkt;
68 /* flush added to prevent cmd failure
69 * incase the queue is full
71 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
73 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
74 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
76 ctrl_cmd.eth_dev = eth_dev;
79 ctrl_pkt.ncmd.s.cmd = LIO_CMD_RX_CTL;
80 ctrl_pkt.ncmd.s.param1 = start_stop;
81 ctrl_pkt.ctrl_cmd = &ctrl_cmd;
83 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
84 lio_dev_err(lio_dev, "Failed to send RX Control message\n");
88 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
89 lio_dev_err(lio_dev, "RX Control command timed out\n");
96 /* store statistics names and its offset in stats structure */
97 struct rte_lio_xstats_name_off {
98 char name[RTE_ETH_XSTATS_NAME_SIZE];
102 static const struct rte_lio_xstats_name_off rte_lio_stats_strings[] = {
103 {"rx_pkts", offsetof(struct octeon_rx_stats, total_rcvd)},
104 {"rx_bytes", offsetof(struct octeon_rx_stats, bytes_rcvd)},
105 {"rx_broadcast_pkts", offsetof(struct octeon_rx_stats, total_bcst)},
106 {"rx_multicast_pkts", offsetof(struct octeon_rx_stats, total_mcst)},
107 {"rx_flow_ctrl_pkts", offsetof(struct octeon_rx_stats, ctl_rcvd)},
108 {"rx_fifo_err", offsetof(struct octeon_rx_stats, fifo_err)},
109 {"rx_dmac_drop", offsetof(struct octeon_rx_stats, dmac_drop)},
110 {"rx_fcs_err", offsetof(struct octeon_rx_stats, fcs_err)},
111 {"rx_jabber_err", offsetof(struct octeon_rx_stats, jabber_err)},
112 {"rx_l2_err", offsetof(struct octeon_rx_stats, l2_err)},
113 {"rx_vxlan_pkts", offsetof(struct octeon_rx_stats, fw_rx_vxlan)},
114 {"rx_vxlan_err", offsetof(struct octeon_rx_stats, fw_rx_vxlan_err)},
115 {"rx_lro_pkts", offsetof(struct octeon_rx_stats, fw_lro_pkts)},
116 {"tx_pkts", (offsetof(struct octeon_tx_stats, total_pkts_sent)) +
117 sizeof(struct octeon_rx_stats)},
118 {"tx_bytes", (offsetof(struct octeon_tx_stats, total_bytes_sent)) +
119 sizeof(struct octeon_rx_stats)},
120 {"tx_broadcast_pkts",
121 (offsetof(struct octeon_tx_stats, bcast_pkts_sent)) +
122 sizeof(struct octeon_rx_stats)},
123 {"tx_multicast_pkts",
124 (offsetof(struct octeon_tx_stats, mcast_pkts_sent)) +
125 sizeof(struct octeon_rx_stats)},
126 {"tx_flow_ctrl_pkts", (offsetof(struct octeon_tx_stats, ctl_sent)) +
127 sizeof(struct octeon_rx_stats)},
128 {"tx_fifo_err", (offsetof(struct octeon_tx_stats, fifo_err)) +
129 sizeof(struct octeon_rx_stats)},
130 {"tx_total_collisions", (offsetof(struct octeon_tx_stats,
132 sizeof(struct octeon_rx_stats)},
133 {"tx_tso", (offsetof(struct octeon_tx_stats, fw_tso)) +
134 sizeof(struct octeon_rx_stats)},
135 {"tx_vxlan_pkts", (offsetof(struct octeon_tx_stats, fw_tx_vxlan)) +
136 sizeof(struct octeon_rx_stats)},
139 #define LIO_NB_XSTATS RTE_DIM(rte_lio_stats_strings)
141 /* Get hw stats of the port */
143 lio_dev_xstats_get(struct rte_eth_dev *eth_dev, struct rte_eth_xstat *xstats,
146 struct lio_device *lio_dev = LIO_DEV(eth_dev);
147 uint16_t timeout = LIO_MAX_CMD_TIMEOUT;
148 struct octeon_link_stats *hw_stats;
149 struct lio_link_stats_resp *resp;
150 struct lio_soft_command *sc;
155 if (!lio_dev->intf_open) {
156 lio_dev_err(lio_dev, "Port %d down\n",
161 if (n < LIO_NB_XSTATS)
162 return LIO_NB_XSTATS;
164 resp_size = sizeof(struct lio_link_stats_resp);
165 sc = lio_alloc_soft_command(lio_dev, 0, resp_size, 0);
169 resp = (struct lio_link_stats_resp *)sc->virtrptr;
170 lio_prepare_soft_command(lio_dev, sc, LIO_OPCODE,
171 LIO_OPCODE_PORT_STATS, 0, 0, 0);
173 /* Setting wait time in seconds */
174 sc->wait_time = LIO_MAX_CMD_TIMEOUT / 1000;
176 retval = lio_send_soft_command(lio_dev, sc);
177 if (retval == LIO_IQ_SEND_FAILED) {
178 lio_dev_err(lio_dev, "failed to get port stats from firmware. status: %x\n",
183 while ((*sc->status_word == LIO_COMPLETION_WORD_INIT) && --timeout) {
184 lio_flush_iq(lio_dev, lio_dev->instr_queue[sc->iq_no]);
185 lio_process_ordered_list(lio_dev);
189 retval = resp->status;
191 lio_dev_err(lio_dev, "failed to get port stats from firmware\n");
195 lio_swap_8B_data((uint64_t *)(&resp->link_stats),
196 sizeof(struct octeon_link_stats) >> 3);
198 hw_stats = &resp->link_stats;
200 for (i = 0; i < LIO_NB_XSTATS; i++) {
203 *(uint64_t *)(((char *)hw_stats) +
204 rte_lio_stats_strings[i].offset);
207 lio_free_soft_command(sc);
209 return LIO_NB_XSTATS;
212 lio_free_soft_command(sc);
218 lio_dev_xstats_get_names(struct rte_eth_dev *eth_dev,
219 struct rte_eth_xstat_name *xstats_names,
220 unsigned limit __rte_unused)
222 struct lio_device *lio_dev = LIO_DEV(eth_dev);
225 if (!lio_dev->intf_open) {
226 lio_dev_err(lio_dev, "Port %d down\n",
231 if (xstats_names == NULL)
232 return LIO_NB_XSTATS;
234 /* Note: limit checked in rte_eth_xstats_names() */
236 for (i = 0; i < LIO_NB_XSTATS; i++) {
237 snprintf(xstats_names[i].name, sizeof(xstats_names[i].name),
238 "%s", rte_lio_stats_strings[i].name);
241 return LIO_NB_XSTATS;
244 /* Reset hw stats for the port */
246 lio_dev_xstats_reset(struct rte_eth_dev *eth_dev)
248 struct lio_device *lio_dev = LIO_DEV(eth_dev);
249 struct lio_dev_ctrl_cmd ctrl_cmd;
250 struct lio_ctrl_pkt ctrl_pkt;
252 if (!lio_dev->intf_open) {
253 lio_dev_err(lio_dev, "Port %d down\n",
258 /* flush added to prevent cmd failure
259 * incase the queue is full
261 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
263 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
264 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
266 ctrl_cmd.eth_dev = eth_dev;
269 ctrl_pkt.ncmd.s.cmd = LIO_CMD_CLEAR_STATS;
270 ctrl_pkt.ctrl_cmd = &ctrl_cmd;
272 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
273 lio_dev_err(lio_dev, "Failed to send clear stats command\n");
277 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
278 lio_dev_err(lio_dev, "Clear stats command timed out\n");
282 /* clear stored per queue stats */
283 RTE_FUNC_PTR_OR_RET(*eth_dev->dev_ops->stats_reset);
284 (*eth_dev->dev_ops->stats_reset)(eth_dev);
287 /* Retrieve the device statistics (# packets in/out, # bytes in/out, etc */
289 lio_dev_stats_get(struct rte_eth_dev *eth_dev,
290 struct rte_eth_stats *stats)
292 struct lio_device *lio_dev = LIO_DEV(eth_dev);
293 struct lio_droq_stats *oq_stats;
294 struct lio_iq_stats *iq_stats;
295 struct lio_instr_queue *txq;
296 struct lio_droq *droq;
302 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
303 iq_no = lio_dev->linfo.txpciq[i].s.q_no;
304 txq = lio_dev->instr_queue[iq_no];
306 iq_stats = &txq->stats;
307 pkts += iq_stats->tx_done;
308 drop += iq_stats->tx_dropped;
309 bytes += iq_stats->tx_tot_bytes;
313 stats->opackets = pkts;
314 stats->obytes = bytes;
315 stats->oerrors = drop;
321 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
322 oq_no = lio_dev->linfo.rxpciq[i].s.q_no;
323 droq = lio_dev->droq[oq_no];
325 oq_stats = &droq->stats;
326 pkts += oq_stats->rx_pkts_received;
327 drop += (oq_stats->rx_dropped +
328 oq_stats->dropped_toomany +
329 oq_stats->dropped_nomem);
330 bytes += oq_stats->rx_bytes_received;
333 stats->ibytes = bytes;
334 stats->ipackets = pkts;
335 stats->ierrors = drop;
341 lio_dev_stats_reset(struct rte_eth_dev *eth_dev)
343 struct lio_device *lio_dev = LIO_DEV(eth_dev);
344 struct lio_droq_stats *oq_stats;
345 struct lio_iq_stats *iq_stats;
346 struct lio_instr_queue *txq;
347 struct lio_droq *droq;
350 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
351 iq_no = lio_dev->linfo.txpciq[i].s.q_no;
352 txq = lio_dev->instr_queue[iq_no];
354 iq_stats = &txq->stats;
355 memset(iq_stats, 0, sizeof(struct lio_iq_stats));
359 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
360 oq_no = lio_dev->linfo.rxpciq[i].s.q_no;
361 droq = lio_dev->droq[oq_no];
363 oq_stats = &droq->stats;
364 memset(oq_stats, 0, sizeof(struct lio_droq_stats));
370 lio_dev_info_get(struct rte_eth_dev *eth_dev,
371 struct rte_eth_dev_info *devinfo)
373 struct lio_device *lio_dev = LIO_DEV(eth_dev);
374 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
376 devinfo->pci_dev = pci_dev;
378 switch (pci_dev->id.subsystem_device_id) {
379 /* CN23xx 10G cards */
380 case PCI_SUBSYS_DEV_ID_CN2350_210:
381 case PCI_SUBSYS_DEV_ID_CN2360_210:
382 case PCI_SUBSYS_DEV_ID_CN2350_210SVPN3:
383 case PCI_SUBSYS_DEV_ID_CN2360_210SVPN3:
384 case PCI_SUBSYS_DEV_ID_CN2350_210SVPT:
385 case PCI_SUBSYS_DEV_ID_CN2360_210SVPT:
386 devinfo->speed_capa = ETH_LINK_SPEED_10G;
388 /* CN23xx 25G cards */
389 case PCI_SUBSYS_DEV_ID_CN2350_225:
390 case PCI_SUBSYS_DEV_ID_CN2360_225:
391 devinfo->speed_capa = ETH_LINK_SPEED_25G;
394 devinfo->speed_capa = ETH_LINK_SPEED_10G;
396 "Unknown CN23XX subsystem device id. Setting 10G as default link speed.\n");
399 devinfo->max_rx_queues = lio_dev->max_rx_queues;
400 devinfo->max_tx_queues = lio_dev->max_tx_queues;
402 devinfo->min_rx_bufsize = LIO_MIN_RX_BUF_SIZE;
403 devinfo->max_rx_pktlen = LIO_MAX_RX_PKTLEN;
405 devinfo->max_mac_addrs = 1;
407 devinfo->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM |
408 DEV_RX_OFFLOAD_UDP_CKSUM |
409 DEV_RX_OFFLOAD_TCP_CKSUM |
410 DEV_RX_OFFLOAD_VLAN_STRIP);
411 devinfo->tx_offload_capa = (DEV_TX_OFFLOAD_IPV4_CKSUM |
412 DEV_TX_OFFLOAD_UDP_CKSUM |
413 DEV_TX_OFFLOAD_TCP_CKSUM |
414 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM);
416 devinfo->rx_desc_lim = lio_rx_desc_lim;
417 devinfo->tx_desc_lim = lio_tx_desc_lim;
419 devinfo->reta_size = LIO_RSS_MAX_TABLE_SZ;
420 devinfo->hash_key_size = LIO_RSS_MAX_KEY_SZ;
421 devinfo->flow_type_rss_offloads = (ETH_RSS_IPV4 |
422 ETH_RSS_NONFRAG_IPV4_TCP |
424 ETH_RSS_NONFRAG_IPV6_TCP |
426 ETH_RSS_IPV6_TCP_EX);
430 lio_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
432 struct lio_device *lio_dev = LIO_DEV(eth_dev);
433 uint16_t pf_mtu = lio_dev->linfo.link.s.mtu;
434 uint32_t frame_len = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
435 struct lio_dev_ctrl_cmd ctrl_cmd;
436 struct lio_ctrl_pkt ctrl_pkt;
438 PMD_INIT_FUNC_TRACE();
440 if (!lio_dev->intf_open) {
441 lio_dev_err(lio_dev, "Port %d down, can't set MTU\n",
446 /* check if VF MTU is within allowed range.
447 * New value should not exceed PF MTU.
449 if ((mtu < ETHER_MIN_MTU) || (mtu > pf_mtu)) {
450 lio_dev_err(lio_dev, "VF MTU should be >= %d and <= %d\n",
451 ETHER_MIN_MTU, pf_mtu);
455 /* flush added to prevent cmd failure
456 * incase the queue is full
458 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
460 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
461 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
463 ctrl_cmd.eth_dev = eth_dev;
466 ctrl_pkt.ncmd.s.cmd = LIO_CMD_CHANGE_MTU;
467 ctrl_pkt.ncmd.s.param1 = mtu;
468 ctrl_pkt.ctrl_cmd = &ctrl_cmd;
470 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
471 lio_dev_err(lio_dev, "Failed to send command to change MTU\n");
475 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
476 lio_dev_err(lio_dev, "Command to change MTU timed out\n");
480 if (frame_len > ETHER_MAX_LEN)
481 eth_dev->data->dev_conf.rxmode.offloads |=
482 DEV_RX_OFFLOAD_JUMBO_FRAME;
484 eth_dev->data->dev_conf.rxmode.offloads &=
485 ~DEV_RX_OFFLOAD_JUMBO_FRAME;
487 eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_len;
488 eth_dev->data->mtu = mtu;
494 lio_dev_rss_reta_update(struct rte_eth_dev *eth_dev,
495 struct rte_eth_rss_reta_entry64 *reta_conf,
498 struct lio_device *lio_dev = LIO_DEV(eth_dev);
499 struct lio_rss_ctx *rss_state = &lio_dev->rss_state;
500 struct lio_rss_set *rss_param;
501 struct lio_dev_ctrl_cmd ctrl_cmd;
502 struct lio_ctrl_pkt ctrl_pkt;
505 if (!lio_dev->intf_open) {
506 lio_dev_err(lio_dev, "Port %d down, can't update reta\n",
511 if (reta_size != LIO_RSS_MAX_TABLE_SZ) {
513 "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)\n",
514 reta_size, LIO_RSS_MAX_TABLE_SZ);
518 /* flush added to prevent cmd failure
519 * incase the queue is full
521 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
523 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
524 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
526 rss_param = (struct lio_rss_set *)&ctrl_pkt.udd[0];
528 ctrl_cmd.eth_dev = eth_dev;
531 ctrl_pkt.ncmd.s.cmd = LIO_CMD_SET_RSS;
532 ctrl_pkt.ncmd.s.more = sizeof(struct lio_rss_set) >> 3;
533 ctrl_pkt.ctrl_cmd = &ctrl_cmd;
535 rss_param->param.flags = 0xF;
536 rss_param->param.flags &= ~LIO_RSS_PARAM_ITABLE_UNCHANGED;
537 rss_param->param.itablesize = LIO_RSS_MAX_TABLE_SZ;
539 for (i = 0; i < (reta_size / RTE_RETA_GROUP_SIZE); i++) {
540 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) {
541 if ((reta_conf[i].mask) & ((uint64_t)1 << j)) {
542 index = (i * RTE_RETA_GROUP_SIZE) + j;
543 rss_state->itable[index] = reta_conf[i].reta[j];
548 rss_state->itable_size = LIO_RSS_MAX_TABLE_SZ;
549 memcpy(rss_param->itable, rss_state->itable, rss_state->itable_size);
551 lio_swap_8B_data((uint64_t *)rss_param, LIO_RSS_PARAM_SIZE >> 3);
553 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
554 lio_dev_err(lio_dev, "Failed to set rss hash\n");
558 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
559 lio_dev_err(lio_dev, "Set rss hash timed out\n");
567 lio_dev_rss_reta_query(struct rte_eth_dev *eth_dev,
568 struct rte_eth_rss_reta_entry64 *reta_conf,
571 struct lio_device *lio_dev = LIO_DEV(eth_dev);
572 struct lio_rss_ctx *rss_state = &lio_dev->rss_state;
575 if (reta_size != LIO_RSS_MAX_TABLE_SZ) {
577 "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)\n",
578 reta_size, LIO_RSS_MAX_TABLE_SZ);
582 num = reta_size / RTE_RETA_GROUP_SIZE;
584 for (i = 0; i < num; i++) {
585 memcpy(reta_conf->reta,
586 &rss_state->itable[i * RTE_RETA_GROUP_SIZE],
587 RTE_RETA_GROUP_SIZE);
595 lio_dev_rss_hash_conf_get(struct rte_eth_dev *eth_dev,
596 struct rte_eth_rss_conf *rss_conf)
598 struct lio_device *lio_dev = LIO_DEV(eth_dev);
599 struct lio_rss_ctx *rss_state = &lio_dev->rss_state;
600 uint8_t *hash_key = NULL;
603 if (rss_state->hash_disable) {
604 lio_dev_info(lio_dev, "RSS disabled in nic\n");
605 rss_conf->rss_hf = 0;
610 hash_key = rss_conf->rss_key;
611 if (hash_key != NULL)
612 memcpy(hash_key, rss_state->hash_key, rss_state->hash_key_size);
615 rss_hf |= ETH_RSS_IPV4;
616 if (rss_state->tcp_hash)
617 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
619 rss_hf |= ETH_RSS_IPV6;
620 if (rss_state->ipv6_tcp_hash)
621 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
622 if (rss_state->ipv6_ex)
623 rss_hf |= ETH_RSS_IPV6_EX;
624 if (rss_state->ipv6_tcp_ex_hash)
625 rss_hf |= ETH_RSS_IPV6_TCP_EX;
627 rss_conf->rss_hf = rss_hf;
633 lio_dev_rss_hash_update(struct rte_eth_dev *eth_dev,
634 struct rte_eth_rss_conf *rss_conf)
636 struct lio_device *lio_dev = LIO_DEV(eth_dev);
637 struct lio_rss_ctx *rss_state = &lio_dev->rss_state;
638 struct lio_rss_set *rss_param;
639 struct lio_dev_ctrl_cmd ctrl_cmd;
640 struct lio_ctrl_pkt ctrl_pkt;
642 if (!lio_dev->intf_open) {
643 lio_dev_err(lio_dev, "Port %d down, can't update hash\n",
648 /* flush added to prevent cmd failure
649 * incase the queue is full
651 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
653 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
654 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
656 rss_param = (struct lio_rss_set *)&ctrl_pkt.udd[0];
658 ctrl_cmd.eth_dev = eth_dev;
661 ctrl_pkt.ncmd.s.cmd = LIO_CMD_SET_RSS;
662 ctrl_pkt.ncmd.s.more = sizeof(struct lio_rss_set) >> 3;
663 ctrl_pkt.ctrl_cmd = &ctrl_cmd;
665 rss_param->param.flags = 0xF;
667 if (rss_conf->rss_key) {
668 rss_param->param.flags &= ~LIO_RSS_PARAM_HASH_KEY_UNCHANGED;
669 rss_state->hash_key_size = LIO_RSS_MAX_KEY_SZ;
670 rss_param->param.hashkeysize = LIO_RSS_MAX_KEY_SZ;
671 memcpy(rss_state->hash_key, rss_conf->rss_key,
672 rss_state->hash_key_size);
673 memcpy(rss_param->key, rss_state->hash_key,
674 rss_state->hash_key_size);
677 if ((rss_conf->rss_hf & LIO_RSS_OFFLOAD_ALL) == 0) {
678 /* Can't disable rss through hash flags,
679 * if it is enabled by default during init
681 if (!rss_state->hash_disable)
684 /* This is for --disable-rss during testpmd launch */
685 rss_param->param.flags |= LIO_RSS_PARAM_DISABLE_RSS;
687 uint32_t hashinfo = 0;
689 /* Can't enable rss if disabled by default during init */
690 if (rss_state->hash_disable)
693 if (rss_conf->rss_hf & ETH_RSS_IPV4) {
694 hashinfo |= LIO_RSS_HASH_IPV4;
700 if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
701 hashinfo |= LIO_RSS_HASH_TCP_IPV4;
702 rss_state->tcp_hash = 1;
704 rss_state->tcp_hash = 0;
707 if (rss_conf->rss_hf & ETH_RSS_IPV6) {
708 hashinfo |= LIO_RSS_HASH_IPV6;
714 if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) {
715 hashinfo |= LIO_RSS_HASH_TCP_IPV6;
716 rss_state->ipv6_tcp_hash = 1;
718 rss_state->ipv6_tcp_hash = 0;
721 if (rss_conf->rss_hf & ETH_RSS_IPV6_EX) {
722 hashinfo |= LIO_RSS_HASH_IPV6_EX;
723 rss_state->ipv6_ex = 1;
725 rss_state->ipv6_ex = 0;
728 if (rss_conf->rss_hf & ETH_RSS_IPV6_TCP_EX) {
729 hashinfo |= LIO_RSS_HASH_TCP_IPV6_EX;
730 rss_state->ipv6_tcp_ex_hash = 1;
732 rss_state->ipv6_tcp_ex_hash = 0;
735 rss_param->param.flags &= ~LIO_RSS_PARAM_HASH_INFO_UNCHANGED;
736 rss_param->param.hashinfo = hashinfo;
739 lio_swap_8B_data((uint64_t *)rss_param, LIO_RSS_PARAM_SIZE >> 3);
741 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
742 lio_dev_err(lio_dev, "Failed to set rss hash\n");
746 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
747 lio_dev_err(lio_dev, "Set rss hash timed out\n");
755 * Add vxlan dest udp port for an interface.
758 * Pointer to the structure rte_eth_dev
763 * On success return 0
764 * On failure return -1
767 lio_dev_udp_tunnel_add(struct rte_eth_dev *eth_dev,
768 struct rte_eth_udp_tunnel *udp_tnl)
770 struct lio_device *lio_dev = LIO_DEV(eth_dev);
771 struct lio_dev_ctrl_cmd ctrl_cmd;
772 struct lio_ctrl_pkt ctrl_pkt;
777 if (udp_tnl->prot_type != RTE_TUNNEL_TYPE_VXLAN) {
778 lio_dev_err(lio_dev, "Unsupported tunnel type\n");
782 /* flush added to prevent cmd failure
783 * incase the queue is full
785 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
787 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
788 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
790 ctrl_cmd.eth_dev = eth_dev;
793 ctrl_pkt.ncmd.s.cmd = LIO_CMD_VXLAN_PORT_CONFIG;
794 ctrl_pkt.ncmd.s.param1 = udp_tnl->udp_port;
795 ctrl_pkt.ncmd.s.more = LIO_CMD_VXLAN_PORT_ADD;
796 ctrl_pkt.ctrl_cmd = &ctrl_cmd;
798 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
799 lio_dev_err(lio_dev, "Failed to send VXLAN_PORT_ADD command\n");
803 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
804 lio_dev_err(lio_dev, "VXLAN_PORT_ADD command timed out\n");
812 * Remove vxlan dest udp port for an interface.
815 * Pointer to the structure rte_eth_dev
820 * On success return 0
821 * On failure return -1
824 lio_dev_udp_tunnel_del(struct rte_eth_dev *eth_dev,
825 struct rte_eth_udp_tunnel *udp_tnl)
827 struct lio_device *lio_dev = LIO_DEV(eth_dev);
828 struct lio_dev_ctrl_cmd ctrl_cmd;
829 struct lio_ctrl_pkt ctrl_pkt;
834 if (udp_tnl->prot_type != RTE_TUNNEL_TYPE_VXLAN) {
835 lio_dev_err(lio_dev, "Unsupported tunnel type\n");
839 /* flush added to prevent cmd failure
840 * incase the queue is full
842 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
844 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
845 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
847 ctrl_cmd.eth_dev = eth_dev;
850 ctrl_pkt.ncmd.s.cmd = LIO_CMD_VXLAN_PORT_CONFIG;
851 ctrl_pkt.ncmd.s.param1 = udp_tnl->udp_port;
852 ctrl_pkt.ncmd.s.more = LIO_CMD_VXLAN_PORT_DEL;
853 ctrl_pkt.ctrl_cmd = &ctrl_cmd;
855 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
856 lio_dev_err(lio_dev, "Failed to send VXLAN_PORT_DEL command\n");
860 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
861 lio_dev_err(lio_dev, "VXLAN_PORT_DEL command timed out\n");
869 lio_dev_vlan_filter_set(struct rte_eth_dev *eth_dev, uint16_t vlan_id, int on)
871 struct lio_device *lio_dev = LIO_DEV(eth_dev);
872 struct lio_dev_ctrl_cmd ctrl_cmd;
873 struct lio_ctrl_pkt ctrl_pkt;
875 if (lio_dev->linfo.vlan_is_admin_assigned)
878 /* flush added to prevent cmd failure
879 * incase the queue is full
881 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
883 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
884 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
886 ctrl_cmd.eth_dev = eth_dev;
889 ctrl_pkt.ncmd.s.cmd = on ?
890 LIO_CMD_ADD_VLAN_FILTER : LIO_CMD_DEL_VLAN_FILTER;
891 ctrl_pkt.ncmd.s.param1 = vlan_id;
892 ctrl_pkt.ctrl_cmd = &ctrl_cmd;
894 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
895 lio_dev_err(lio_dev, "Failed to %s VLAN port\n",
896 on ? "add" : "remove");
900 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
901 lio_dev_err(lio_dev, "Command to %s VLAN port timed out\n",
902 on ? "add" : "remove");
910 lio_hweight64(uint64_t w)
912 uint64_t res = w - ((w >> 1) & 0x5555555555555555ul);
915 (res & 0x3333333333333333ul) + ((res >> 2) & 0x3333333333333333ul);
916 res = (res + (res >> 4)) & 0x0F0F0F0F0F0F0F0Ful;
917 res = res + (res >> 8);
918 res = res + (res >> 16);
920 return (res + (res >> 32)) & 0x00000000000000FFul;
924 lio_dev_link_update(struct rte_eth_dev *eth_dev,
925 int wait_to_complete __rte_unused)
927 struct lio_device *lio_dev = LIO_DEV(eth_dev);
928 struct rte_eth_link link;
931 memset(&link, 0, sizeof(link));
932 link.link_status = ETH_LINK_DOWN;
933 link.link_speed = ETH_SPEED_NUM_NONE;
934 link.link_duplex = ETH_LINK_HALF_DUPLEX;
935 link.link_autoneg = ETH_LINK_AUTONEG;
937 /* Return what we found */
938 if (lio_dev->linfo.link.s.link_up == 0) {
939 /* Interface is down */
940 return rte_eth_linkstatus_set(eth_dev, &link);
943 link.link_status = ETH_LINK_UP; /* Interface is up */
944 link.link_duplex = ETH_LINK_FULL_DUPLEX;
945 switch (lio_dev->linfo.link.s.speed) {
946 case LIO_LINK_SPEED_10000:
947 link.link_speed = ETH_SPEED_NUM_10G;
949 case LIO_LINK_SPEED_25000:
950 link.link_speed = ETH_SPEED_NUM_25G;
953 link.link_speed = ETH_SPEED_NUM_NONE;
954 link.link_duplex = ETH_LINK_HALF_DUPLEX;
957 return rte_eth_linkstatus_set(eth_dev, &link);
961 * \brief Net device enable, disable allmulticast
962 * @param eth_dev Pointer to the structure rte_eth_dev
965 lio_change_dev_flag(struct rte_eth_dev *eth_dev)
967 struct lio_device *lio_dev = LIO_DEV(eth_dev);
968 struct lio_dev_ctrl_cmd ctrl_cmd;
969 struct lio_ctrl_pkt ctrl_pkt;
971 /* flush added to prevent cmd failure
972 * incase the queue is full
974 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
976 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
977 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
979 ctrl_cmd.eth_dev = eth_dev;
982 /* Create a ctrl pkt command to be sent to core app. */
983 ctrl_pkt.ncmd.s.cmd = LIO_CMD_CHANGE_DEVFLAGS;
984 ctrl_pkt.ncmd.s.param1 = lio_dev->ifflags;
985 ctrl_pkt.ctrl_cmd = &ctrl_cmd;
987 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
988 lio_dev_err(lio_dev, "Failed to send change flag message\n");
992 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd))
993 lio_dev_err(lio_dev, "Change dev flag command timed out\n");
997 lio_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
999 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1001 if (strcmp(lio_dev->firmware_version, LIO_VF_TRUST_MIN_VERSION) < 0) {
1002 lio_dev_err(lio_dev, "Require firmware version >= %s\n",
1003 LIO_VF_TRUST_MIN_VERSION);
1007 if (!lio_dev->intf_open) {
1008 lio_dev_err(lio_dev, "Port %d down, can't enable promiscuous\n",
1013 lio_dev->ifflags |= LIO_IFFLAG_PROMISC;
1014 lio_change_dev_flag(eth_dev);
1018 lio_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)
1020 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1022 if (strcmp(lio_dev->firmware_version, LIO_VF_TRUST_MIN_VERSION) < 0) {
1023 lio_dev_err(lio_dev, "Require firmware version >= %s\n",
1024 LIO_VF_TRUST_MIN_VERSION);
1028 if (!lio_dev->intf_open) {
1029 lio_dev_err(lio_dev, "Port %d down, can't disable promiscuous\n",
1034 lio_dev->ifflags &= ~LIO_IFFLAG_PROMISC;
1035 lio_change_dev_flag(eth_dev);
1039 lio_dev_allmulticast_enable(struct rte_eth_dev *eth_dev)
1041 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1043 if (!lio_dev->intf_open) {
1044 lio_dev_err(lio_dev, "Port %d down, can't enable multicast\n",
1049 lio_dev->ifflags |= LIO_IFFLAG_ALLMULTI;
1050 lio_change_dev_flag(eth_dev);
1054 lio_dev_allmulticast_disable(struct rte_eth_dev *eth_dev)
1056 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1058 if (!lio_dev->intf_open) {
1059 lio_dev_err(lio_dev, "Port %d down, can't disable multicast\n",
1064 lio_dev->ifflags &= ~LIO_IFFLAG_ALLMULTI;
1065 lio_change_dev_flag(eth_dev);
1069 lio_dev_rss_configure(struct rte_eth_dev *eth_dev)
1071 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1072 struct lio_rss_ctx *rss_state = &lio_dev->rss_state;
1073 struct rte_eth_rss_reta_entry64 reta_conf[8];
1074 struct rte_eth_rss_conf rss_conf;
1077 /* Configure the RSS key and the RSS protocols used to compute
1078 * the RSS hash of input packets.
1080 rss_conf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
1081 if ((rss_conf.rss_hf & LIO_RSS_OFFLOAD_ALL) == 0) {
1082 rss_state->hash_disable = 1;
1083 lio_dev_rss_hash_update(eth_dev, &rss_conf);
1087 if (rss_conf.rss_key == NULL)
1088 rss_conf.rss_key = lio_rss_key; /* Default hash key */
1090 lio_dev_rss_hash_update(eth_dev, &rss_conf);
1092 memset(reta_conf, 0, sizeof(reta_conf));
1093 for (i = 0; i < LIO_RSS_MAX_TABLE_SZ; i++) {
1094 uint8_t q_idx, conf_idx, reta_idx;
1096 q_idx = (uint8_t)((eth_dev->data->nb_rx_queues > 1) ?
1097 i % eth_dev->data->nb_rx_queues : 0);
1098 conf_idx = i / RTE_RETA_GROUP_SIZE;
1099 reta_idx = i % RTE_RETA_GROUP_SIZE;
1100 reta_conf[conf_idx].reta[reta_idx] = q_idx;
1101 reta_conf[conf_idx].mask |= ((uint64_t)1 << reta_idx);
1104 lio_dev_rss_reta_update(eth_dev, reta_conf, LIO_RSS_MAX_TABLE_SZ);
1108 lio_dev_mq_rx_configure(struct rte_eth_dev *eth_dev)
1110 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1111 struct lio_rss_ctx *rss_state = &lio_dev->rss_state;
1112 struct rte_eth_rss_conf rss_conf;
1114 switch (eth_dev->data->dev_conf.rxmode.mq_mode) {
1116 lio_dev_rss_configure(eth_dev);
1118 case ETH_MQ_RX_NONE:
1119 /* if mq_mode is none, disable rss mode. */
1121 memset(&rss_conf, 0, sizeof(rss_conf));
1122 rss_state->hash_disable = 1;
1123 lio_dev_rss_hash_update(eth_dev, &rss_conf);
1128 * Setup our receive queue/ringbuffer. This is the
1129 * queue the Octeon uses to send us packets and
1130 * responses. We are given a memory pool for our
1131 * packet buffers that are used to populate the receive
1135 * Pointer to the structure rte_eth_dev
1138 * @param num_rx_descs
1139 * Number of entries in the queue
1141 * Where to allocate memory
1143 * Pointer to the struction rte_eth_rxconf
1145 * Pointer to the packet pool
1148 * - On success, return 0
1149 * - On failure, return -1
1152 lio_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no,
1153 uint16_t num_rx_descs, unsigned int socket_id,
1154 const struct rte_eth_rxconf *rx_conf __rte_unused,
1155 struct rte_mempool *mp)
1157 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1158 struct rte_pktmbuf_pool_private *mbp_priv;
1159 uint32_t fw_mapped_oq;
1162 if (q_no >= lio_dev->nb_rx_queues) {
1163 lio_dev_err(lio_dev, "Invalid rx queue number %u\n", q_no);
1167 lio_dev_dbg(lio_dev, "setting up rx queue %u\n", q_no);
1169 fw_mapped_oq = lio_dev->linfo.rxpciq[q_no].s.q_no;
1171 /* Free previous allocation if any */
1172 if (eth_dev->data->rx_queues[q_no] != NULL) {
1173 lio_dev_rx_queue_release(eth_dev->data->rx_queues[q_no]);
1174 eth_dev->data->rx_queues[q_no] = NULL;
1177 mbp_priv = rte_mempool_get_priv(mp);
1178 buf_size = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
1180 if (lio_setup_droq(lio_dev, fw_mapped_oq, num_rx_descs, buf_size, mp,
1182 lio_dev_err(lio_dev, "droq allocation failed\n");
1186 eth_dev->data->rx_queues[q_no] = lio_dev->droq[fw_mapped_oq];
1192 * Release the receive queue/ringbuffer. Called by
1196 * Opaque pointer to the receive queue to release
1202 lio_dev_rx_queue_release(void *rxq)
1204 struct lio_droq *droq = rxq;
1209 lio_delete_droq_queue(droq->lio_dev, oq_no);
1214 * Allocate and initialize SW ring. Initialize associated HW registers.
1217 * Pointer to structure rte_eth_dev
1222 * @param num_tx_descs
1223 * Number of ringbuffer descriptors
1226 * NUMA socket id, used for memory allocations
1229 * Pointer to the structure rte_eth_txconf
1232 * - On success, return 0
1233 * - On failure, return -errno value
1236 lio_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no,
1237 uint16_t num_tx_descs, unsigned int socket_id,
1238 const struct rte_eth_txconf *tx_conf __rte_unused)
1240 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1241 int fw_mapped_iq = lio_dev->linfo.txpciq[q_no].s.q_no;
1244 if (q_no >= lio_dev->nb_tx_queues) {
1245 lio_dev_err(lio_dev, "Invalid tx queue number %u\n", q_no);
1249 lio_dev_dbg(lio_dev, "setting up tx queue %u\n", q_no);
1251 /* Free previous allocation if any */
1252 if (eth_dev->data->tx_queues[q_no] != NULL) {
1253 lio_dev_tx_queue_release(eth_dev->data->tx_queues[q_no]);
1254 eth_dev->data->tx_queues[q_no] = NULL;
1257 retval = lio_setup_iq(lio_dev, q_no, lio_dev->linfo.txpciq[q_no],
1258 num_tx_descs, lio_dev, socket_id);
1261 lio_dev_err(lio_dev, "Runtime IQ(TxQ) creation failed.\n");
1265 retval = lio_setup_sglists(lio_dev, q_no, fw_mapped_iq,
1266 lio_dev->instr_queue[fw_mapped_iq]->nb_desc,
1270 lio_delete_instruction_queue(lio_dev, fw_mapped_iq);
1274 eth_dev->data->tx_queues[q_no] = lio_dev->instr_queue[fw_mapped_iq];
1280 * Release the transmit queue/ringbuffer. Called by
1284 * Opaque pointer to the transmit queue to release
1290 lio_dev_tx_queue_release(void *txq)
1292 struct lio_instr_queue *tq = txq;
1293 uint32_t fw_mapped_iq_no;
1298 lio_delete_sglist(tq);
1300 fw_mapped_iq_no = tq->txpciq.s.q_no;
1301 lio_delete_instruction_queue(tq->lio_dev, fw_mapped_iq_no);
1306 * Api to check link state.
1309 lio_dev_get_link_status(struct rte_eth_dev *eth_dev)
1311 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1312 uint16_t timeout = LIO_MAX_CMD_TIMEOUT;
1313 struct lio_link_status_resp *resp;
1314 union octeon_link_status *ls;
1315 struct lio_soft_command *sc;
1318 if (!lio_dev->intf_open)
1321 resp_size = sizeof(struct lio_link_status_resp);
1322 sc = lio_alloc_soft_command(lio_dev, 0, resp_size, 0);
1326 resp = (struct lio_link_status_resp *)sc->virtrptr;
1327 lio_prepare_soft_command(lio_dev, sc, LIO_OPCODE,
1328 LIO_OPCODE_INFO, 0, 0, 0);
1330 /* Setting wait time in seconds */
1331 sc->wait_time = LIO_MAX_CMD_TIMEOUT / 1000;
1333 if (lio_send_soft_command(lio_dev, sc) == LIO_IQ_SEND_FAILED)
1334 goto get_status_fail;
1336 while ((*sc->status_word == LIO_COMPLETION_WORD_INIT) && --timeout) {
1337 lio_flush_iq(lio_dev, lio_dev->instr_queue[sc->iq_no]);
1342 goto get_status_fail;
1344 ls = &resp->link_info.link;
1346 lio_swap_8B_data((uint64_t *)ls, sizeof(union octeon_link_status) >> 3);
1348 if (lio_dev->linfo.link.link_status64 != ls->link_status64) {
1349 if (ls->s.mtu < eth_dev->data->mtu) {
1350 lio_dev_info(lio_dev, "Lowered VF MTU to %d as PF MTU dropped\n",
1352 eth_dev->data->mtu = ls->s.mtu;
1354 lio_dev->linfo.link.link_status64 = ls->link_status64;
1355 lio_dev_link_update(eth_dev, 0);
1358 lio_free_soft_command(sc);
1363 lio_free_soft_command(sc);
1366 /* This function will be invoked every LSC_TIMEOUT ns (100ms)
1367 * and will update link state if it changes.
1370 lio_sync_link_state_check(void *eth_dev)
1372 struct lio_device *lio_dev =
1373 (((struct rte_eth_dev *)eth_dev)->data->dev_private);
1375 if (lio_dev->port_configured)
1376 lio_dev_get_link_status(eth_dev);
1378 /* Schedule periodic link status check.
1379 * Stop check if interface is close and start again while opening.
1381 if (lio_dev->intf_open)
1382 rte_eal_alarm_set(LIO_LSC_TIMEOUT, lio_sync_link_state_check,
1387 lio_dev_start(struct rte_eth_dev *eth_dev)
1390 uint32_t frame_len = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
1391 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1392 uint16_t timeout = LIO_MAX_CMD_TIMEOUT;
1395 lio_dev_info(lio_dev, "Starting port %d\n", eth_dev->data->port_id);
1397 if (lio_dev->fn_list.enable_io_queues(lio_dev))
1400 if (lio_send_rx_ctrl_cmd(eth_dev, 1))
1403 /* Ready for link status updates */
1404 lio_dev->intf_open = 1;
1407 /* Configure RSS if device configured with multiple RX queues. */
1408 lio_dev_mq_rx_configure(eth_dev);
1410 /* start polling for lsc */
1411 ret = rte_eal_alarm_set(LIO_LSC_TIMEOUT,
1412 lio_sync_link_state_check,
1415 lio_dev_err(lio_dev,
1416 "link state check handler creation failed\n");
1417 goto dev_lsc_handle_error;
1420 while ((lio_dev->linfo.link.link_status64 == 0) && (--timeout))
1423 if (lio_dev->linfo.link.link_status64 == 0) {
1425 goto dev_mtu_set_error;
1428 mtu = (uint16_t)(frame_len - ETHER_HDR_LEN - ETHER_CRC_LEN);
1429 if (mtu < ETHER_MIN_MTU)
1430 mtu = ETHER_MIN_MTU;
1432 if (eth_dev->data->mtu != mtu) {
1433 ret = lio_dev_mtu_set(eth_dev, mtu);
1435 goto dev_mtu_set_error;
1441 rte_eal_alarm_cancel(lio_sync_link_state_check, eth_dev);
1443 dev_lsc_handle_error:
1444 lio_dev->intf_open = 0;
1445 lio_send_rx_ctrl_cmd(eth_dev, 0);
1450 /* Stop device and disable input/output functions */
1452 lio_dev_stop(struct rte_eth_dev *eth_dev)
1454 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1456 lio_dev_info(lio_dev, "Stopping port %d\n", eth_dev->data->port_id);
1457 lio_dev->intf_open = 0;
1460 /* Cancel callback if still running. */
1461 rte_eal_alarm_cancel(lio_sync_link_state_check, eth_dev);
1463 lio_send_rx_ctrl_cmd(eth_dev, 0);
1465 lio_wait_for_instr_fetch(lio_dev);
1467 /* Clear recorded link status */
1468 lio_dev->linfo.link.link_status64 = 0;
1472 lio_dev_set_link_up(struct rte_eth_dev *eth_dev)
1474 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1476 if (!lio_dev->intf_open) {
1477 lio_dev_info(lio_dev, "Port is stopped, Start the port first\n");
1481 if (lio_dev->linfo.link.s.link_up) {
1482 lio_dev_info(lio_dev, "Link is already UP\n");
1486 if (lio_send_rx_ctrl_cmd(eth_dev, 1)) {
1487 lio_dev_err(lio_dev, "Unable to set Link UP\n");
1491 lio_dev->linfo.link.s.link_up = 1;
1492 eth_dev->data->dev_link.link_status = ETH_LINK_UP;
1498 lio_dev_set_link_down(struct rte_eth_dev *eth_dev)
1500 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1502 if (!lio_dev->intf_open) {
1503 lio_dev_info(lio_dev, "Port is stopped, Start the port first\n");
1507 if (!lio_dev->linfo.link.s.link_up) {
1508 lio_dev_info(lio_dev, "Link is already DOWN\n");
1512 lio_dev->linfo.link.s.link_up = 0;
1513 eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
1515 if (lio_send_rx_ctrl_cmd(eth_dev, 0)) {
1516 lio_dev->linfo.link.s.link_up = 1;
1517 eth_dev->data->dev_link.link_status = ETH_LINK_UP;
1518 lio_dev_err(lio_dev, "Unable to set Link Down\n");
1526 * Reset and stop the device. This occurs on the first
1527 * call to this routine. Subsequent calls will simply
1528 * return. NB: This will require the NIC to be rebooted.
1531 * Pointer to the structure rte_eth_dev
1537 lio_dev_close(struct rte_eth_dev *eth_dev)
1539 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1541 lio_dev_info(lio_dev, "closing port %d\n", eth_dev->data->port_id);
1543 if (lio_dev->intf_open)
1544 lio_dev_stop(eth_dev);
1546 /* Reset ioq regs */
1547 lio_dev->fn_list.setup_device_regs(lio_dev);
1549 if (lio_dev->pci_dev->kdrv == RTE_KDRV_IGB_UIO) {
1550 cn23xx_vf_ask_pf_to_do_flr(lio_dev);
1551 rte_delay_ms(LIO_PCI_FLR_WAIT);
1555 lio_dev->fn_list.free_mbox(lio_dev);
1557 /* Free glist resources */
1558 rte_free(lio_dev->glist_head);
1559 rte_free(lio_dev->glist_lock);
1560 lio_dev->glist_head = NULL;
1561 lio_dev->glist_lock = NULL;
1563 lio_dev->port_configured = 0;
1565 /* Delete all queues */
1566 lio_dev_clear_queues(eth_dev);
1570 * Enable tunnel rx checksum verification from firmware.
1573 lio_enable_hw_tunnel_rx_checksum(struct rte_eth_dev *eth_dev)
1575 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1576 struct lio_dev_ctrl_cmd ctrl_cmd;
1577 struct lio_ctrl_pkt ctrl_pkt;
1579 /* flush added to prevent cmd failure
1580 * incase the queue is full
1582 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
1584 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
1585 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
1587 ctrl_cmd.eth_dev = eth_dev;
1590 ctrl_pkt.ncmd.s.cmd = LIO_CMD_TNL_RX_CSUM_CTL;
1591 ctrl_pkt.ncmd.s.param1 = LIO_CMD_RXCSUM_ENABLE;
1592 ctrl_pkt.ctrl_cmd = &ctrl_cmd;
1594 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
1595 lio_dev_err(lio_dev, "Failed to send TNL_RX_CSUM command\n");
1599 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd))
1600 lio_dev_err(lio_dev, "TNL_RX_CSUM command timed out\n");
1604 * Enable checksum calculation for inner packet in a tunnel.
1607 lio_enable_hw_tunnel_tx_checksum(struct rte_eth_dev *eth_dev)
1609 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1610 struct lio_dev_ctrl_cmd ctrl_cmd;
1611 struct lio_ctrl_pkt ctrl_pkt;
1613 /* flush added to prevent cmd failure
1614 * incase the queue is full
1616 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
1618 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
1619 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
1621 ctrl_cmd.eth_dev = eth_dev;
1624 ctrl_pkt.ncmd.s.cmd = LIO_CMD_TNL_TX_CSUM_CTL;
1625 ctrl_pkt.ncmd.s.param1 = LIO_CMD_TXCSUM_ENABLE;
1626 ctrl_pkt.ctrl_cmd = &ctrl_cmd;
1628 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
1629 lio_dev_err(lio_dev, "Failed to send TNL_TX_CSUM command\n");
1633 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd))
1634 lio_dev_err(lio_dev, "TNL_TX_CSUM command timed out\n");
1638 lio_send_queue_count_update(struct rte_eth_dev *eth_dev, int num_txq,
1641 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1642 struct lio_dev_ctrl_cmd ctrl_cmd;
1643 struct lio_ctrl_pkt ctrl_pkt;
1645 if (strcmp(lio_dev->firmware_version, LIO_Q_RECONF_MIN_VERSION) < 0) {
1646 lio_dev_err(lio_dev, "Require firmware version >= %s\n",
1647 LIO_Q_RECONF_MIN_VERSION);
1651 /* flush added to prevent cmd failure
1652 * incase the queue is full
1654 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
1656 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
1657 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
1659 ctrl_cmd.eth_dev = eth_dev;
1662 ctrl_pkt.ncmd.s.cmd = LIO_CMD_QUEUE_COUNT_CTL;
1663 ctrl_pkt.ncmd.s.param1 = num_txq;
1664 ctrl_pkt.ncmd.s.param2 = num_rxq;
1665 ctrl_pkt.ctrl_cmd = &ctrl_cmd;
1667 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
1668 lio_dev_err(lio_dev, "Failed to send queue count control command\n");
1672 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
1673 lio_dev_err(lio_dev, "Queue count control command timed out\n");
1681 lio_reconf_queues(struct rte_eth_dev *eth_dev, int num_txq, int num_rxq)
1683 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1685 if (lio_dev->nb_rx_queues != num_rxq ||
1686 lio_dev->nb_tx_queues != num_txq) {
1687 if (lio_send_queue_count_update(eth_dev, num_txq, num_rxq))
1689 lio_dev->nb_rx_queues = num_rxq;
1690 lio_dev->nb_tx_queues = num_txq;
1693 if (lio_dev->intf_open)
1694 lio_dev_stop(eth_dev);
1696 /* Reset ioq registers */
1697 if (lio_dev->fn_list.setup_device_regs(lio_dev)) {
1698 lio_dev_err(lio_dev, "Failed to configure device registers\n");
1706 lio_dev_configure(struct rte_eth_dev *eth_dev)
1708 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1709 uint16_t timeout = LIO_MAX_CMD_TIMEOUT;
1710 int retval, num_iqueues, num_oqueues;
1711 uint8_t mac[ETHER_ADDR_LEN], i;
1712 struct lio_if_cfg_resp *resp;
1713 struct lio_soft_command *sc;
1714 union lio_if_cfg if_cfg;
1717 PMD_INIT_FUNC_TRACE();
1719 /* Inform firmware about change in number of queues to use.
1720 * Disable IO queues and reset registers for re-configuration.
1722 if (lio_dev->port_configured)
1723 return lio_reconf_queues(eth_dev,
1724 eth_dev->data->nb_tx_queues,
1725 eth_dev->data->nb_rx_queues);
1727 lio_dev->nb_rx_queues = eth_dev->data->nb_rx_queues;
1728 lio_dev->nb_tx_queues = eth_dev->data->nb_tx_queues;
1730 /* Set max number of queues which can be re-configured. */
1731 lio_dev->max_rx_queues = eth_dev->data->nb_rx_queues;
1732 lio_dev->max_tx_queues = eth_dev->data->nb_tx_queues;
1734 resp_size = sizeof(struct lio_if_cfg_resp);
1735 sc = lio_alloc_soft_command(lio_dev, 0, resp_size, 0);
1739 resp = (struct lio_if_cfg_resp *)sc->virtrptr;
1741 /* Firmware doesn't have capability to reconfigure the queues,
1742 * Claim all queues, and use as many required
1744 if_cfg.if_cfg64 = 0;
1745 if_cfg.s.num_iqueues = lio_dev->nb_tx_queues;
1746 if_cfg.s.num_oqueues = lio_dev->nb_rx_queues;
1747 if_cfg.s.base_queue = 0;
1749 if_cfg.s.gmx_port_id = lio_dev->pf_num;
1751 lio_prepare_soft_command(lio_dev, sc, LIO_OPCODE,
1752 LIO_OPCODE_IF_CFG, 0,
1753 if_cfg.if_cfg64, 0);
1755 /* Setting wait time in seconds */
1756 sc->wait_time = LIO_MAX_CMD_TIMEOUT / 1000;
1758 retval = lio_send_soft_command(lio_dev, sc);
1759 if (retval == LIO_IQ_SEND_FAILED) {
1760 lio_dev_err(lio_dev, "iq/oq config failed status: %x\n",
1762 /* Soft instr is freed by driver in case of failure. */
1763 goto nic_config_fail;
1766 /* Sleep on a wait queue till the cond flag indicates that the
1767 * response arrived or timed-out.
1769 while ((*sc->status_word == LIO_COMPLETION_WORD_INIT) && --timeout) {
1770 lio_flush_iq(lio_dev, lio_dev->instr_queue[sc->iq_no]);
1771 lio_process_ordered_list(lio_dev);
1775 retval = resp->status;
1777 lio_dev_err(lio_dev, "iq/oq config failed\n");
1778 goto nic_config_fail;
1781 snprintf(lio_dev->firmware_version, LIO_FW_VERSION_LENGTH, "%s",
1782 resp->cfg_info.lio_firmware_version);
1784 lio_swap_8B_data((uint64_t *)(&resp->cfg_info),
1785 sizeof(struct octeon_if_cfg_info) >> 3);
1787 num_iqueues = lio_hweight64(resp->cfg_info.iqmask);
1788 num_oqueues = lio_hweight64(resp->cfg_info.oqmask);
1790 if (!(num_iqueues) || !(num_oqueues)) {
1791 lio_dev_err(lio_dev,
1792 "Got bad iqueues (%016lx) or oqueues (%016lx) from firmware.\n",
1793 (unsigned long)resp->cfg_info.iqmask,
1794 (unsigned long)resp->cfg_info.oqmask);
1795 goto nic_config_fail;
1798 lio_dev_dbg(lio_dev,
1799 "interface %d, iqmask %016lx, oqmask %016lx, numiqueues %d, numoqueues %d\n",
1800 eth_dev->data->port_id,
1801 (unsigned long)resp->cfg_info.iqmask,
1802 (unsigned long)resp->cfg_info.oqmask,
1803 num_iqueues, num_oqueues);
1805 lio_dev->linfo.num_rxpciq = num_oqueues;
1806 lio_dev->linfo.num_txpciq = num_iqueues;
1808 for (i = 0; i < num_oqueues; i++) {
1809 lio_dev->linfo.rxpciq[i].rxpciq64 =
1810 resp->cfg_info.linfo.rxpciq[i].rxpciq64;
1811 lio_dev_dbg(lio_dev, "index %d OQ %d\n",
1812 i, lio_dev->linfo.rxpciq[i].s.q_no);
1815 for (i = 0; i < num_iqueues; i++) {
1816 lio_dev->linfo.txpciq[i].txpciq64 =
1817 resp->cfg_info.linfo.txpciq[i].txpciq64;
1818 lio_dev_dbg(lio_dev, "index %d IQ %d\n",
1819 i, lio_dev->linfo.txpciq[i].s.q_no);
1822 lio_dev->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;
1823 lio_dev->linfo.gmxport = resp->cfg_info.linfo.gmxport;
1824 lio_dev->linfo.link.link_status64 =
1825 resp->cfg_info.linfo.link.link_status64;
1827 /* 64-bit swap required on LE machines */
1828 lio_swap_8B_data(&lio_dev->linfo.hw_addr, 1);
1829 for (i = 0; i < ETHER_ADDR_LEN; i++)
1830 mac[i] = *((uint8_t *)(((uint8_t *)&lio_dev->linfo.hw_addr) +
1833 /* Copy the permanent MAC address */
1834 ether_addr_copy((struct ether_addr *)mac, ð_dev->data->mac_addrs[0]);
1836 /* enable firmware checksum support for tunnel packets */
1837 lio_enable_hw_tunnel_rx_checksum(eth_dev);
1838 lio_enable_hw_tunnel_tx_checksum(eth_dev);
1840 lio_dev->glist_lock =
1841 rte_zmalloc(NULL, sizeof(*lio_dev->glist_lock) * num_iqueues, 0);
1842 if (lio_dev->glist_lock == NULL)
1845 lio_dev->glist_head =
1846 rte_zmalloc(NULL, sizeof(*lio_dev->glist_head) * num_iqueues,
1848 if (lio_dev->glist_head == NULL) {
1849 rte_free(lio_dev->glist_lock);
1850 lio_dev->glist_lock = NULL;
1854 lio_dev_link_update(eth_dev, 0);
1856 lio_dev->port_configured = 1;
1858 lio_free_soft_command(sc);
1860 /* Reset ioq regs */
1861 lio_dev->fn_list.setup_device_regs(lio_dev);
1863 /* Free iq_0 used during init */
1864 lio_free_instr_queue0(lio_dev);
1869 lio_dev_err(lio_dev, "Failed retval %d\n", retval);
1870 lio_free_soft_command(sc);
1871 lio_free_instr_queue0(lio_dev);
1876 /* Define our ethernet definitions */
1877 static const struct eth_dev_ops liovf_eth_dev_ops = {
1878 .dev_configure = lio_dev_configure,
1879 .dev_start = lio_dev_start,
1880 .dev_stop = lio_dev_stop,
1881 .dev_set_link_up = lio_dev_set_link_up,
1882 .dev_set_link_down = lio_dev_set_link_down,
1883 .dev_close = lio_dev_close,
1884 .promiscuous_enable = lio_dev_promiscuous_enable,
1885 .promiscuous_disable = lio_dev_promiscuous_disable,
1886 .allmulticast_enable = lio_dev_allmulticast_enable,
1887 .allmulticast_disable = lio_dev_allmulticast_disable,
1888 .link_update = lio_dev_link_update,
1889 .stats_get = lio_dev_stats_get,
1890 .xstats_get = lio_dev_xstats_get,
1891 .xstats_get_names = lio_dev_xstats_get_names,
1892 .stats_reset = lio_dev_stats_reset,
1893 .xstats_reset = lio_dev_xstats_reset,
1894 .dev_infos_get = lio_dev_info_get,
1895 .vlan_filter_set = lio_dev_vlan_filter_set,
1896 .rx_queue_setup = lio_dev_rx_queue_setup,
1897 .rx_queue_release = lio_dev_rx_queue_release,
1898 .tx_queue_setup = lio_dev_tx_queue_setup,
1899 .tx_queue_release = lio_dev_tx_queue_release,
1900 .reta_update = lio_dev_rss_reta_update,
1901 .reta_query = lio_dev_rss_reta_query,
1902 .rss_hash_conf_get = lio_dev_rss_hash_conf_get,
1903 .rss_hash_update = lio_dev_rss_hash_update,
1904 .udp_tunnel_port_add = lio_dev_udp_tunnel_add,
1905 .udp_tunnel_port_del = lio_dev_udp_tunnel_del,
1906 .mtu_set = lio_dev_mtu_set,
1910 lio_check_pf_hs_response(void *lio_dev)
1912 struct lio_device *dev = lio_dev;
1914 /* check till response arrives */
1915 if (dev->pfvf_hsword.coproc_tics_per_us)
1918 cn23xx_vf_handle_mbox(dev);
1920 rte_eal_alarm_set(1, lio_check_pf_hs_response, lio_dev);
1924 * \brief Identify the LIO device and to map the BAR address space
1925 * @param lio_dev lio device
1928 lio_chip_specific_setup(struct lio_device *lio_dev)
1930 struct rte_pci_device *pdev = lio_dev->pci_dev;
1931 uint32_t dev_id = pdev->id.device_id;
1936 case LIO_CN23XX_VF_VID:
1937 lio_dev->chip_id = LIO_CN23XX_VF_VID;
1938 ret = cn23xx_vf_setup_device(lio_dev);
1943 lio_dev_err(lio_dev, "Unsupported Chip\n");
1947 lio_dev_info(lio_dev, "DEVICE : %s\n", s);
1953 lio_first_time_init(struct lio_device *lio_dev,
1954 struct rte_pci_device *pdev)
1958 PMD_INIT_FUNC_TRACE();
1960 /* set dpdk specific pci device pointer */
1961 lio_dev->pci_dev = pdev;
1963 /* Identify the LIO type and set device ops */
1964 if (lio_chip_specific_setup(lio_dev)) {
1965 lio_dev_err(lio_dev, "Chip specific setup failed\n");
1969 /* Initialize soft command buffer pool */
1970 if (lio_setup_sc_buffer_pool(lio_dev)) {
1971 lio_dev_err(lio_dev, "sc buffer pool allocation failed\n");
1975 /* Initialize lists to manage the requests of different types that
1976 * arrive from applications for this lio device.
1978 lio_setup_response_list(lio_dev);
1980 if (lio_dev->fn_list.setup_mbox(lio_dev)) {
1981 lio_dev_err(lio_dev, "Mailbox setup failed\n");
1985 /* Check PF response */
1986 lio_check_pf_hs_response((void *)lio_dev);
1988 /* Do handshake and exit if incompatible PF driver */
1989 if (cn23xx_pfvf_handshake(lio_dev))
1992 /* Request and wait for device reset. */
1993 if (pdev->kdrv == RTE_KDRV_IGB_UIO) {
1994 cn23xx_vf_ask_pf_to_do_flr(lio_dev);
1995 /* FLR wait time doubled as a precaution. */
1996 rte_delay_ms(LIO_PCI_FLR_WAIT * 2);
1999 if (lio_dev->fn_list.setup_device_regs(lio_dev)) {
2000 lio_dev_err(lio_dev, "Failed to configure device registers\n");
2004 if (lio_setup_instr_queue0(lio_dev)) {
2005 lio_dev_err(lio_dev, "Failed to setup instruction queue 0\n");
2009 dpdk_queues = (int)lio_dev->sriov_info.rings_per_vf;
2011 lio_dev->max_tx_queues = dpdk_queues;
2012 lio_dev->max_rx_queues = dpdk_queues;
2014 /* Enable input and output queues for this device */
2015 if (lio_dev->fn_list.enable_io_queues(lio_dev))
2021 lio_free_sc_buffer_pool(lio_dev);
2022 if (lio_dev->mbox[0])
2023 lio_dev->fn_list.free_mbox(lio_dev);
2024 if (lio_dev->instr_queue[0])
2025 lio_free_instr_queue0(lio_dev);
2031 lio_eth_dev_uninit(struct rte_eth_dev *eth_dev)
2033 struct lio_device *lio_dev = LIO_DEV(eth_dev);
2035 PMD_INIT_FUNC_TRACE();
2037 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2040 /* lio_free_sc_buffer_pool */
2041 lio_free_sc_buffer_pool(lio_dev);
2043 rte_free(eth_dev->data->mac_addrs);
2044 eth_dev->data->mac_addrs = NULL;
2046 eth_dev->dev_ops = NULL;
2047 eth_dev->rx_pkt_burst = NULL;
2048 eth_dev->tx_pkt_burst = NULL;
2054 lio_eth_dev_init(struct rte_eth_dev *eth_dev)
2056 struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(eth_dev);
2057 struct lio_device *lio_dev = LIO_DEV(eth_dev);
2059 PMD_INIT_FUNC_TRACE();
2061 eth_dev->rx_pkt_burst = &lio_dev_recv_pkts;
2062 eth_dev->tx_pkt_burst = &lio_dev_xmit_pkts;
2064 /* Primary does the initialization. */
2065 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2068 rte_eth_copy_pci_info(eth_dev, pdev);
2070 if (pdev->mem_resource[0].addr) {
2071 lio_dev->hw_addr = pdev->mem_resource[0].addr;
2073 PMD_INIT_LOG(ERR, "ERROR: Failed to map BAR0\n");
2077 lio_dev->eth_dev = eth_dev;
2078 /* set lio device print string */
2079 snprintf(lio_dev->dev_string, sizeof(lio_dev->dev_string),
2080 "%s[%02x:%02x.%x]", pdev->driver->driver.name,
2081 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
2083 lio_dev->port_id = eth_dev->data->port_id;
2085 if (lio_first_time_init(lio_dev, pdev)) {
2086 lio_dev_err(lio_dev, "Device init failed\n");
2090 eth_dev->dev_ops = &liovf_eth_dev_ops;
2091 eth_dev->data->mac_addrs = rte_zmalloc("lio", ETHER_ADDR_LEN, 0);
2092 if (eth_dev->data->mac_addrs == NULL) {
2093 lio_dev_err(lio_dev,
2094 "MAC addresses memory allocation failed\n");
2095 eth_dev->dev_ops = NULL;
2096 eth_dev->rx_pkt_burst = NULL;
2097 eth_dev->tx_pkt_burst = NULL;
2101 rte_atomic64_set(&lio_dev->status, LIO_DEV_RUNNING);
2104 lio_dev->port_configured = 0;
2105 /* Always allow unicast packets */
2106 lio_dev->ifflags |= LIO_IFFLAG_UNICAST;
2112 lio_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2113 struct rte_pci_device *pci_dev)
2115 struct rte_eth_dev *eth_dev;
2118 eth_dev = rte_eth_dev_pci_allocate(pci_dev,
2119 sizeof(struct lio_device));
2120 if (eth_dev == NULL)
2123 ret = lio_eth_dev_init(eth_dev);
2125 rte_eth_dev_pci_release(eth_dev);
2131 lio_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
2133 return rte_eth_dev_pci_generic_remove(pci_dev,
2134 lio_eth_dev_uninit);
2137 /* Set of PCI devices this driver supports */
2138 static const struct rte_pci_id pci_id_liovf_map[] = {
2139 { RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, LIO_CN23XX_VF_VID) },
2140 { .vendor_id = 0, /* sentinel */ }
2143 static struct rte_pci_driver rte_liovf_pmd = {
2144 .id_table = pci_id_liovf_map,
2145 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
2146 .probe = lio_eth_dev_pci_probe,
2147 .remove = lio_eth_dev_pci_remove,
2150 RTE_PMD_REGISTER_PCI(net_liovf, rte_liovf_pmd);
2151 RTE_PMD_REGISTER_PCI_TABLE(net_liovf, pci_id_liovf_map);
2152 RTE_PMD_REGISTER_KMOD_DEP(net_liovf, "* igb_uio | vfio-pci");
2154 RTE_INIT(lio_init_log);
2158 lio_logtype_init = rte_log_register("pmd.net.liquidio.init");
2159 if (lio_logtype_init >= 0)
2160 rte_log_set_level(lio_logtype_init, RTE_LOG_NOTICE);
2161 lio_logtype_driver = rte_log_register("pmd.net.liquidio.driver");
2162 if (lio_logtype_driver >= 0)
2163 rte_log_set_level(lio_logtype_driver, RTE_LOG_NOTICE);