4 * Copyright(c) 2017 Cavium, Inc.. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Cavium, Inc. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <rte_ethdev.h>
35 #include <rte_cycles.h>
36 #include <rte_malloc.h>
37 #include <rte_alarm.h>
40 #include "lio_23xx_vf.h"
41 #include "lio_ethdev.h"
44 /* Default RSS key in use */
45 static uint8_t lio_rss_key[40] = {
46 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
47 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
48 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
49 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
50 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
53 static const struct rte_eth_desc_lim lio_rx_desc_lim = {
54 .nb_max = CN23XX_MAX_OQ_DESCRIPTORS,
55 .nb_min = CN23XX_MIN_OQ_DESCRIPTORS,
59 static const struct rte_eth_desc_lim lio_tx_desc_lim = {
60 .nb_max = CN23XX_MAX_IQ_DESCRIPTORS,
61 .nb_min = CN23XX_MIN_IQ_DESCRIPTORS,
65 /* Wait for control command to reach nic. */
67 lio_wait_for_ctrl_cmd(struct lio_device *lio_dev,
68 struct lio_dev_ctrl_cmd *ctrl_cmd)
70 uint16_t timeout = LIO_MAX_CMD_TIMEOUT;
72 while ((ctrl_cmd->cond == 0) && --timeout) {
73 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
81 * \brief Send Rx control command
82 * @param eth_dev Pointer to the structure rte_eth_dev
83 * @param start_stop whether to start or stop
86 lio_send_rx_ctrl_cmd(struct rte_eth_dev *eth_dev, int start_stop)
88 struct lio_device *lio_dev = LIO_DEV(eth_dev);
89 struct lio_dev_ctrl_cmd ctrl_cmd;
90 struct lio_ctrl_pkt ctrl_pkt;
92 /* flush added to prevent cmd failure
93 * incase the queue is full
95 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
97 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
98 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
100 ctrl_cmd.eth_dev = eth_dev;
103 ctrl_pkt.ncmd.s.cmd = LIO_CMD_RX_CTL;
104 ctrl_pkt.ncmd.s.param1 = start_stop;
105 ctrl_pkt.ctrl_cmd = &ctrl_cmd;
107 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
108 lio_dev_err(lio_dev, "Failed to send RX Control message\n");
112 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
113 lio_dev_err(lio_dev, "RX Control command timed out\n");
121 lio_dev_info_get(struct rte_eth_dev *eth_dev,
122 struct rte_eth_dev_info *devinfo)
124 struct lio_device *lio_dev = LIO_DEV(eth_dev);
126 devinfo->max_rx_queues = lio_dev->max_rx_queues;
127 devinfo->max_tx_queues = lio_dev->max_tx_queues;
129 devinfo->min_rx_bufsize = LIO_MIN_RX_BUF_SIZE;
130 devinfo->max_rx_pktlen = LIO_MAX_RX_PKTLEN;
132 devinfo->max_mac_addrs = 1;
134 devinfo->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM |
135 DEV_RX_OFFLOAD_UDP_CKSUM |
136 DEV_RX_OFFLOAD_TCP_CKSUM);
137 devinfo->tx_offload_capa = (DEV_TX_OFFLOAD_IPV4_CKSUM |
138 DEV_TX_OFFLOAD_UDP_CKSUM |
139 DEV_TX_OFFLOAD_TCP_CKSUM);
141 devinfo->rx_desc_lim = lio_rx_desc_lim;
142 devinfo->tx_desc_lim = lio_tx_desc_lim;
144 devinfo->reta_size = LIO_RSS_MAX_TABLE_SZ;
145 devinfo->hash_key_size = LIO_RSS_MAX_KEY_SZ;
146 devinfo->flow_type_rss_offloads = (ETH_RSS_IPV4 |
147 ETH_RSS_NONFRAG_IPV4_TCP |
149 ETH_RSS_NONFRAG_IPV6_TCP |
151 ETH_RSS_IPV6_TCP_EX);
155 lio_dev_validate_vf_mtu(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
157 struct lio_device *lio_dev = LIO_DEV(eth_dev);
159 PMD_INIT_FUNC_TRACE();
161 if (!lio_dev->intf_open) {
162 lio_dev_err(lio_dev, "Port %d down, can't check MTU\n",
167 /* Limit the MTU to make sure the ethernet packets are between
168 * ETHER_MIN_MTU bytes and PF's MTU
170 if ((new_mtu < ETHER_MIN_MTU) ||
171 (new_mtu > lio_dev->linfo.link.s.mtu)) {
172 lio_dev_err(lio_dev, "Invalid MTU: %d\n", new_mtu);
173 lio_dev_err(lio_dev, "Valid range %d and %d\n",
174 ETHER_MIN_MTU, lio_dev->linfo.link.s.mtu);
182 lio_dev_rss_reta_update(struct rte_eth_dev *eth_dev,
183 struct rte_eth_rss_reta_entry64 *reta_conf,
186 struct lio_device *lio_dev = LIO_DEV(eth_dev);
187 struct lio_rss_ctx *rss_state = &lio_dev->rss_state;
188 struct lio_rss_set *rss_param;
189 struct lio_dev_ctrl_cmd ctrl_cmd;
190 struct lio_ctrl_pkt ctrl_pkt;
193 if (!lio_dev->intf_open) {
194 lio_dev_err(lio_dev, "Port %d down, can't update reta\n",
199 if (reta_size != LIO_RSS_MAX_TABLE_SZ) {
201 "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)\n",
202 reta_size, LIO_RSS_MAX_TABLE_SZ);
206 /* flush added to prevent cmd failure
207 * incase the queue is full
209 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
211 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
212 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
214 rss_param = (struct lio_rss_set *)&ctrl_pkt.udd[0];
216 ctrl_cmd.eth_dev = eth_dev;
219 ctrl_pkt.ncmd.s.cmd = LIO_CMD_SET_RSS;
220 ctrl_pkt.ncmd.s.more = sizeof(struct lio_rss_set) >> 3;
221 ctrl_pkt.ctrl_cmd = &ctrl_cmd;
223 rss_param->param.flags = 0xF;
224 rss_param->param.flags &= ~LIO_RSS_PARAM_ITABLE_UNCHANGED;
225 rss_param->param.itablesize = LIO_RSS_MAX_TABLE_SZ;
227 for (i = 0; i < (reta_size / RTE_RETA_GROUP_SIZE); i++) {
228 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) {
229 if ((reta_conf[i].mask) & ((uint64_t)1 << j)) {
230 index = (i * RTE_RETA_GROUP_SIZE) + j;
231 rss_state->itable[index] = reta_conf[i].reta[j];
236 rss_state->itable_size = LIO_RSS_MAX_TABLE_SZ;
237 memcpy(rss_param->itable, rss_state->itable, rss_state->itable_size);
239 lio_swap_8B_data((uint64_t *)rss_param, LIO_RSS_PARAM_SIZE >> 3);
241 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
242 lio_dev_err(lio_dev, "Failed to set rss hash\n");
246 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
247 lio_dev_err(lio_dev, "Set rss hash timed out\n");
255 lio_dev_rss_reta_query(struct rte_eth_dev *eth_dev,
256 struct rte_eth_rss_reta_entry64 *reta_conf,
259 struct lio_device *lio_dev = LIO_DEV(eth_dev);
260 struct lio_rss_ctx *rss_state = &lio_dev->rss_state;
263 if (reta_size != LIO_RSS_MAX_TABLE_SZ) {
265 "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)\n",
266 reta_size, LIO_RSS_MAX_TABLE_SZ);
270 num = reta_size / RTE_RETA_GROUP_SIZE;
272 for (i = 0; i < num; i++) {
273 memcpy(reta_conf->reta,
274 &rss_state->itable[i * RTE_RETA_GROUP_SIZE],
275 RTE_RETA_GROUP_SIZE);
283 lio_dev_rss_hash_conf_get(struct rte_eth_dev *eth_dev,
284 struct rte_eth_rss_conf *rss_conf)
286 struct lio_device *lio_dev = LIO_DEV(eth_dev);
287 struct lio_rss_ctx *rss_state = &lio_dev->rss_state;
288 uint8_t *hash_key = NULL;
291 if (rss_state->hash_disable) {
292 lio_dev_info(lio_dev, "RSS disabled in nic\n");
293 rss_conf->rss_hf = 0;
298 hash_key = rss_conf->rss_key;
299 if (hash_key != NULL)
300 memcpy(hash_key, rss_state->hash_key, rss_state->hash_key_size);
303 rss_hf |= ETH_RSS_IPV4;
304 if (rss_state->tcp_hash)
305 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
307 rss_hf |= ETH_RSS_IPV6;
308 if (rss_state->ipv6_tcp_hash)
309 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
310 if (rss_state->ipv6_ex)
311 rss_hf |= ETH_RSS_IPV6_EX;
312 if (rss_state->ipv6_tcp_ex_hash)
313 rss_hf |= ETH_RSS_IPV6_TCP_EX;
315 rss_conf->rss_hf = rss_hf;
321 lio_dev_rss_hash_update(struct rte_eth_dev *eth_dev,
322 struct rte_eth_rss_conf *rss_conf)
324 struct lio_device *lio_dev = LIO_DEV(eth_dev);
325 struct lio_rss_ctx *rss_state = &lio_dev->rss_state;
326 struct lio_rss_set *rss_param;
327 struct lio_dev_ctrl_cmd ctrl_cmd;
328 struct lio_ctrl_pkt ctrl_pkt;
330 if (!lio_dev->intf_open) {
331 lio_dev_err(lio_dev, "Port %d down, can't update hash\n",
336 /* flush added to prevent cmd failure
337 * incase the queue is full
339 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
341 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
342 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
344 rss_param = (struct lio_rss_set *)&ctrl_pkt.udd[0];
346 ctrl_cmd.eth_dev = eth_dev;
349 ctrl_pkt.ncmd.s.cmd = LIO_CMD_SET_RSS;
350 ctrl_pkt.ncmd.s.more = sizeof(struct lio_rss_set) >> 3;
351 ctrl_pkt.ctrl_cmd = &ctrl_cmd;
353 rss_param->param.flags = 0xF;
355 if (rss_conf->rss_key) {
356 rss_param->param.flags &= ~LIO_RSS_PARAM_HASH_KEY_UNCHANGED;
357 rss_state->hash_key_size = LIO_RSS_MAX_KEY_SZ;
358 rss_param->param.hashkeysize = LIO_RSS_MAX_KEY_SZ;
359 memcpy(rss_state->hash_key, rss_conf->rss_key,
360 rss_state->hash_key_size);
361 memcpy(rss_param->key, rss_state->hash_key,
362 rss_state->hash_key_size);
365 if ((rss_conf->rss_hf & LIO_RSS_OFFLOAD_ALL) == 0) {
366 /* Can't disable rss through hash flags,
367 * if it is enabled by default during init
369 if (!rss_state->hash_disable)
372 /* This is for --disable-rss during testpmd launch */
373 rss_param->param.flags |= LIO_RSS_PARAM_DISABLE_RSS;
375 uint32_t hashinfo = 0;
377 /* Can't enable rss if disabled by default during init */
378 if (rss_state->hash_disable)
381 if (rss_conf->rss_hf & ETH_RSS_IPV4) {
382 hashinfo |= LIO_RSS_HASH_IPV4;
388 if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
389 hashinfo |= LIO_RSS_HASH_TCP_IPV4;
390 rss_state->tcp_hash = 1;
392 rss_state->tcp_hash = 0;
395 if (rss_conf->rss_hf & ETH_RSS_IPV6) {
396 hashinfo |= LIO_RSS_HASH_IPV6;
402 if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) {
403 hashinfo |= LIO_RSS_HASH_TCP_IPV6;
404 rss_state->ipv6_tcp_hash = 1;
406 rss_state->ipv6_tcp_hash = 0;
409 if (rss_conf->rss_hf & ETH_RSS_IPV6_EX) {
410 hashinfo |= LIO_RSS_HASH_IPV6_EX;
411 rss_state->ipv6_ex = 1;
413 rss_state->ipv6_ex = 0;
416 if (rss_conf->rss_hf & ETH_RSS_IPV6_TCP_EX) {
417 hashinfo |= LIO_RSS_HASH_TCP_IPV6_EX;
418 rss_state->ipv6_tcp_ex_hash = 1;
420 rss_state->ipv6_tcp_ex_hash = 0;
423 rss_param->param.flags &= ~LIO_RSS_PARAM_HASH_INFO_UNCHANGED;
424 rss_param->param.hashinfo = hashinfo;
427 lio_swap_8B_data((uint64_t *)rss_param, LIO_RSS_PARAM_SIZE >> 3);
429 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
430 lio_dev_err(lio_dev, "Failed to set rss hash\n");
434 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
435 lio_dev_err(lio_dev, "Set rss hash timed out\n");
443 * Atomically writes the link status information into global
444 * structure rte_eth_dev.
447 * - Pointer to the structure rte_eth_dev to read from.
448 * - Pointer to the buffer to be saved with the link status.
451 * - On success, zero.
452 * - On failure, negative value.
455 lio_dev_atomic_write_link_status(struct rte_eth_dev *eth_dev,
456 struct rte_eth_link *link)
458 struct rte_eth_link *dst = ð_dev->data->dev_link;
459 struct rte_eth_link *src = link;
461 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
462 *(uint64_t *)src) == 0)
469 lio_hweight64(uint64_t w)
471 uint64_t res = w - ((w >> 1) & 0x5555555555555555ul);
474 (res & 0x3333333333333333ul) + ((res >> 2) & 0x3333333333333333ul);
475 res = (res + (res >> 4)) & 0x0F0F0F0F0F0F0F0Ful;
476 res = res + (res >> 8);
477 res = res + (res >> 16);
479 return (res + (res >> 32)) & 0x00000000000000FFul;
483 lio_dev_link_update(struct rte_eth_dev *eth_dev,
484 int wait_to_complete __rte_unused)
486 struct lio_device *lio_dev = LIO_DEV(eth_dev);
487 struct rte_eth_link link, old;
490 link.link_status = ETH_LINK_DOWN;
491 link.link_speed = ETH_SPEED_NUM_NONE;
492 link.link_duplex = ETH_LINK_HALF_DUPLEX;
493 memset(&old, 0, sizeof(old));
495 /* Return what we found */
496 if (lio_dev->linfo.link.s.link_up == 0) {
497 /* Interface is down */
498 if (lio_dev_atomic_write_link_status(eth_dev, &link))
500 if (link.link_status == old.link_status)
505 link.link_status = ETH_LINK_UP; /* Interface is up */
506 link.link_duplex = ETH_LINK_FULL_DUPLEX;
507 switch (lio_dev->linfo.link.s.speed) {
508 case LIO_LINK_SPEED_10000:
509 link.link_speed = ETH_SPEED_NUM_10G;
512 link.link_speed = ETH_SPEED_NUM_NONE;
513 link.link_duplex = ETH_LINK_HALF_DUPLEX;
516 if (lio_dev_atomic_write_link_status(eth_dev, &link))
519 if (link.link_status == old.link_status)
526 * \brief Net device enable, disable allmulticast
527 * @param eth_dev Pointer to the structure rte_eth_dev
530 lio_change_dev_flag(struct rte_eth_dev *eth_dev)
532 struct lio_device *lio_dev = LIO_DEV(eth_dev);
533 struct lio_dev_ctrl_cmd ctrl_cmd;
534 struct lio_ctrl_pkt ctrl_pkt;
536 /* flush added to prevent cmd failure
537 * incase the queue is full
539 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
541 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
542 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
544 ctrl_cmd.eth_dev = eth_dev;
547 /* Create a ctrl pkt command to be sent to core app. */
548 ctrl_pkt.ncmd.s.cmd = LIO_CMD_CHANGE_DEVFLAGS;
549 ctrl_pkt.ncmd.s.param1 = lio_dev->ifflags;
550 ctrl_pkt.ctrl_cmd = &ctrl_cmd;
552 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
553 lio_dev_err(lio_dev, "Failed to send change flag message\n");
557 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd))
558 lio_dev_err(lio_dev, "Change dev flag command timed out\n");
562 lio_dev_allmulticast_enable(struct rte_eth_dev *eth_dev)
564 struct lio_device *lio_dev = LIO_DEV(eth_dev);
566 if (!lio_dev->intf_open) {
567 lio_dev_err(lio_dev, "Port %d down, can't enable multicast\n",
572 lio_dev->ifflags |= LIO_IFFLAG_ALLMULTI;
573 lio_change_dev_flag(eth_dev);
577 lio_dev_allmulticast_disable(struct rte_eth_dev *eth_dev)
579 struct lio_device *lio_dev = LIO_DEV(eth_dev);
581 if (!lio_dev->intf_open) {
582 lio_dev_err(lio_dev, "Port %d down, can't disable multicast\n",
587 lio_dev->ifflags &= ~LIO_IFFLAG_ALLMULTI;
588 lio_change_dev_flag(eth_dev);
592 lio_dev_rss_configure(struct rte_eth_dev *eth_dev)
594 struct lio_device *lio_dev = LIO_DEV(eth_dev);
595 struct lio_rss_ctx *rss_state = &lio_dev->rss_state;
596 struct rte_eth_rss_reta_entry64 reta_conf[8];
597 struct rte_eth_rss_conf rss_conf;
600 /* Configure the RSS key and the RSS protocols used to compute
601 * the RSS hash of input packets.
603 rss_conf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
604 if ((rss_conf.rss_hf & LIO_RSS_OFFLOAD_ALL) == 0) {
605 rss_state->hash_disable = 1;
606 lio_dev_rss_hash_update(eth_dev, &rss_conf);
610 if (rss_conf.rss_key == NULL)
611 rss_conf.rss_key = lio_rss_key; /* Default hash key */
613 lio_dev_rss_hash_update(eth_dev, &rss_conf);
615 memset(reta_conf, 0, sizeof(reta_conf));
616 for (i = 0; i < LIO_RSS_MAX_TABLE_SZ; i++) {
617 uint8_t q_idx, conf_idx, reta_idx;
619 q_idx = (uint8_t)((eth_dev->data->nb_rx_queues > 1) ?
620 i % eth_dev->data->nb_rx_queues : 0);
621 conf_idx = i / RTE_RETA_GROUP_SIZE;
622 reta_idx = i % RTE_RETA_GROUP_SIZE;
623 reta_conf[conf_idx].reta[reta_idx] = q_idx;
624 reta_conf[conf_idx].mask |= ((uint64_t)1 << reta_idx);
627 lio_dev_rss_reta_update(eth_dev, reta_conf, LIO_RSS_MAX_TABLE_SZ);
631 lio_dev_mq_rx_configure(struct rte_eth_dev *eth_dev)
633 struct lio_device *lio_dev = LIO_DEV(eth_dev);
634 struct lio_rss_ctx *rss_state = &lio_dev->rss_state;
635 struct rte_eth_rss_conf rss_conf;
637 switch (eth_dev->data->dev_conf.rxmode.mq_mode) {
639 lio_dev_rss_configure(eth_dev);
642 /* if mq_mode is none, disable rss mode. */
644 memset(&rss_conf, 0, sizeof(rss_conf));
645 rss_state->hash_disable = 1;
646 lio_dev_rss_hash_update(eth_dev, &rss_conf);
651 * Setup our receive queue/ringbuffer. This is the
652 * queue the Octeon uses to send us packets and
653 * responses. We are given a memory pool for our
654 * packet buffers that are used to populate the receive
658 * Pointer to the structure rte_eth_dev
661 * @param num_rx_descs
662 * Number of entries in the queue
664 * Where to allocate memory
666 * Pointer to the struction rte_eth_rxconf
668 * Pointer to the packet pool
671 * - On success, return 0
672 * - On failure, return -1
675 lio_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no,
676 uint16_t num_rx_descs, unsigned int socket_id,
677 const struct rte_eth_rxconf *rx_conf __rte_unused,
678 struct rte_mempool *mp)
680 struct lio_device *lio_dev = LIO_DEV(eth_dev);
681 struct rte_pktmbuf_pool_private *mbp_priv;
682 uint32_t fw_mapped_oq;
685 if (q_no >= lio_dev->nb_rx_queues) {
686 lio_dev_err(lio_dev, "Invalid rx queue number %u\n", q_no);
690 lio_dev_dbg(lio_dev, "setting up rx queue %u\n", q_no);
692 fw_mapped_oq = lio_dev->linfo.rxpciq[q_no].s.q_no;
694 if ((lio_dev->droq[fw_mapped_oq]) &&
695 (num_rx_descs != lio_dev->droq[fw_mapped_oq]->max_count)) {
697 "Reconfiguring Rx descs not supported. Configure descs to same value %u or restart application\n",
698 lio_dev->droq[fw_mapped_oq]->max_count);
702 mbp_priv = rte_mempool_get_priv(mp);
703 buf_size = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
705 if (lio_setup_droq(lio_dev, fw_mapped_oq, num_rx_descs, buf_size, mp,
707 lio_dev_err(lio_dev, "droq allocation failed\n");
711 eth_dev->data->rx_queues[q_no] = lio_dev->droq[fw_mapped_oq];
717 * Release the receive queue/ringbuffer. Called by
721 * Opaque pointer to the receive queue to release
727 lio_dev_rx_queue_release(void *rxq)
729 struct lio_droq *droq = rxq;
730 struct lio_device *lio_dev = droq->lio_dev;
733 /* Run time queue deletion not supported */
734 if (lio_dev->port_configured)
739 lio_delete_droq_queue(droq->lio_dev, oq_no);
744 * Allocate and initialize SW ring. Initialize associated HW registers.
747 * Pointer to structure rte_eth_dev
752 * @param num_tx_descs
753 * Number of ringbuffer descriptors
756 * NUMA socket id, used for memory allocations
759 * Pointer to the structure rte_eth_txconf
762 * - On success, return 0
763 * - On failure, return -errno value
766 lio_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no,
767 uint16_t num_tx_descs, unsigned int socket_id,
768 const struct rte_eth_txconf *tx_conf __rte_unused)
770 struct lio_device *lio_dev = LIO_DEV(eth_dev);
771 int fw_mapped_iq = lio_dev->linfo.txpciq[q_no].s.q_no;
774 if (q_no >= lio_dev->nb_tx_queues) {
775 lio_dev_err(lio_dev, "Invalid tx queue number %u\n", q_no);
779 lio_dev_dbg(lio_dev, "setting up tx queue %u\n", q_no);
781 if ((lio_dev->instr_queue[fw_mapped_iq] != NULL) &&
782 (num_tx_descs != lio_dev->instr_queue[fw_mapped_iq]->max_count)) {
784 "Reconfiguring Tx descs not supported. Configure descs to same value %u or restart application\n",
785 lio_dev->instr_queue[fw_mapped_iq]->max_count);
789 retval = lio_setup_iq(lio_dev, q_no, lio_dev->linfo.txpciq[q_no],
790 num_tx_descs, lio_dev, socket_id);
793 lio_dev_err(lio_dev, "Runtime IQ(TxQ) creation failed.\n");
797 retval = lio_setup_sglists(lio_dev, q_no, fw_mapped_iq,
798 lio_dev->instr_queue[fw_mapped_iq]->max_count,
802 lio_delete_instruction_queue(lio_dev, fw_mapped_iq);
806 eth_dev->data->tx_queues[q_no] = lio_dev->instr_queue[fw_mapped_iq];
812 * Release the transmit queue/ringbuffer. Called by
816 * Opaque pointer to the transmit queue to release
822 lio_dev_tx_queue_release(void *txq)
824 struct lio_instr_queue *tq = txq;
825 struct lio_device *lio_dev = tq->lio_dev;
826 uint32_t fw_mapped_iq_no;
828 /* Run time queue deletion not supported */
829 if (lio_dev->port_configured)
834 lio_delete_sglist(tq);
836 fw_mapped_iq_no = tq->txpciq.s.q_no;
837 lio_delete_instruction_queue(tq->lio_dev, fw_mapped_iq_no);
842 * Api to check link state.
845 lio_dev_get_link_status(struct rte_eth_dev *eth_dev)
847 struct lio_device *lio_dev = LIO_DEV(eth_dev);
848 uint16_t timeout = LIO_MAX_CMD_TIMEOUT;
849 struct lio_link_status_resp *resp;
850 union octeon_link_status *ls;
851 struct lio_soft_command *sc;
854 if (!lio_dev->intf_open)
857 resp_size = sizeof(struct lio_link_status_resp);
858 sc = lio_alloc_soft_command(lio_dev, 0, resp_size, 0);
862 resp = (struct lio_link_status_resp *)sc->virtrptr;
863 lio_prepare_soft_command(lio_dev, sc, LIO_OPCODE,
864 LIO_OPCODE_INFO, 0, 0, 0);
866 /* Setting wait time in seconds */
867 sc->wait_time = LIO_MAX_CMD_TIMEOUT / 1000;
869 if (lio_send_soft_command(lio_dev, sc) == LIO_IQ_SEND_FAILED)
870 goto get_status_fail;
872 while ((*sc->status_word == LIO_COMPLETION_WORD_INIT) && --timeout) {
873 lio_flush_iq(lio_dev, lio_dev->instr_queue[sc->iq_no]);
878 goto get_status_fail;
880 ls = &resp->link_info.link;
882 lio_swap_8B_data((uint64_t *)ls, sizeof(union octeon_link_status) >> 3);
884 if (lio_dev->linfo.link.link_status64 != ls->link_status64) {
885 lio_dev->linfo.link.link_status64 = ls->link_status64;
886 lio_dev_link_update(eth_dev, 0);
889 lio_free_soft_command(sc);
894 lio_free_soft_command(sc);
897 /* This function will be invoked every LSC_TIMEOUT ns (100ms)
898 * and will update link state if it changes.
901 lio_sync_link_state_check(void *eth_dev)
903 struct lio_device *lio_dev =
904 (((struct rte_eth_dev *)eth_dev)->data->dev_private);
906 if (lio_dev->port_configured)
907 lio_dev_get_link_status(eth_dev);
909 /* Schedule periodic link status check.
910 * Stop check if interface is close and start again while opening.
912 if (lio_dev->intf_open)
913 rte_eal_alarm_set(LIO_LSC_TIMEOUT, lio_sync_link_state_check,
918 lio_dev_start(struct rte_eth_dev *eth_dev)
920 uint16_t mtu = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
921 struct lio_device *lio_dev = LIO_DEV(eth_dev);
922 uint16_t timeout = LIO_MAX_CMD_TIMEOUT;
925 lio_dev_info(lio_dev, "Starting port %d\n", eth_dev->data->port_id);
927 if (lio_dev->fn_list.enable_io_queues(lio_dev))
930 if (lio_send_rx_ctrl_cmd(eth_dev, 1))
933 /* Ready for link status updates */
934 lio_dev->intf_open = 1;
937 /* Configure RSS if device configured with multiple RX queues. */
938 lio_dev_mq_rx_configure(eth_dev);
940 /* start polling for lsc */
941 ret = rte_eal_alarm_set(LIO_LSC_TIMEOUT,
942 lio_sync_link_state_check,
946 "link state check handler creation failed\n");
947 goto dev_lsc_handle_error;
950 while ((lio_dev->linfo.link.link_status64 == 0) && (--timeout))
953 if (lio_dev->linfo.link.link_status64 == 0) {
955 goto dev_mtu_check_error;
958 if (lio_dev->linfo.link.s.mtu != mtu) {
959 ret = lio_dev_validate_vf_mtu(eth_dev, mtu);
961 goto dev_mtu_check_error;
967 rte_eal_alarm_cancel(lio_sync_link_state_check, eth_dev);
969 dev_lsc_handle_error:
970 lio_dev->intf_open = 0;
971 lio_send_rx_ctrl_cmd(eth_dev, 0);
977 lio_dev_set_link_up(struct rte_eth_dev *eth_dev)
979 struct lio_device *lio_dev = LIO_DEV(eth_dev);
981 if (!lio_dev->intf_open) {
982 lio_dev_info(lio_dev, "Port is stopped, Start the port first\n");
986 if (lio_dev->linfo.link.s.link_up) {
987 lio_dev_info(lio_dev, "Link is already UP\n");
991 if (lio_send_rx_ctrl_cmd(eth_dev, 1)) {
992 lio_dev_err(lio_dev, "Unable to set Link UP\n");
996 lio_dev->linfo.link.s.link_up = 1;
997 eth_dev->data->dev_link.link_status = ETH_LINK_UP;
1003 lio_dev_set_link_down(struct rte_eth_dev *eth_dev)
1005 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1007 if (!lio_dev->intf_open) {
1008 lio_dev_info(lio_dev, "Port is stopped, Start the port first\n");
1012 if (!lio_dev->linfo.link.s.link_up) {
1013 lio_dev_info(lio_dev, "Link is already DOWN\n");
1017 lio_dev->linfo.link.s.link_up = 0;
1018 eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
1020 if (lio_send_rx_ctrl_cmd(eth_dev, 0)) {
1021 lio_dev->linfo.link.s.link_up = 1;
1022 eth_dev->data->dev_link.link_status = ETH_LINK_UP;
1023 lio_dev_err(lio_dev, "Unable to set Link Down\n");
1030 static int lio_dev_configure(struct rte_eth_dev *eth_dev)
1032 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1033 uint16_t timeout = LIO_MAX_CMD_TIMEOUT;
1034 int retval, num_iqueues, num_oqueues;
1035 uint8_t mac[ETHER_ADDR_LEN], i;
1036 struct lio_if_cfg_resp *resp;
1037 struct lio_soft_command *sc;
1038 union lio_if_cfg if_cfg;
1041 PMD_INIT_FUNC_TRACE();
1043 /* Re-configuring firmware not supported.
1044 * Can't change tx/rx queues per port from initial value.
1046 if (lio_dev->port_configured) {
1047 if ((lio_dev->nb_rx_queues != eth_dev->data->nb_rx_queues) ||
1048 (lio_dev->nb_tx_queues != eth_dev->data->nb_tx_queues)) {
1049 lio_dev_err(lio_dev,
1050 "rxq/txq re-conf not supported. Restart application with new value.\n");
1056 lio_dev->nb_rx_queues = eth_dev->data->nb_rx_queues;
1057 lio_dev->nb_tx_queues = eth_dev->data->nb_tx_queues;
1059 resp_size = sizeof(struct lio_if_cfg_resp);
1060 sc = lio_alloc_soft_command(lio_dev, 0, resp_size, 0);
1064 resp = (struct lio_if_cfg_resp *)sc->virtrptr;
1066 /* Firmware doesn't have capability to reconfigure the queues,
1067 * Claim all queues, and use as many required
1069 if_cfg.if_cfg64 = 0;
1070 if_cfg.s.num_iqueues = lio_dev->nb_tx_queues;
1071 if_cfg.s.num_oqueues = lio_dev->nb_rx_queues;
1072 if_cfg.s.base_queue = 0;
1074 if_cfg.s.gmx_port_id = lio_dev->pf_num;
1076 lio_prepare_soft_command(lio_dev, sc, LIO_OPCODE,
1077 LIO_OPCODE_IF_CFG, 0,
1078 if_cfg.if_cfg64, 0);
1080 /* Setting wait time in seconds */
1081 sc->wait_time = LIO_MAX_CMD_TIMEOUT / 1000;
1083 retval = lio_send_soft_command(lio_dev, sc);
1084 if (retval == LIO_IQ_SEND_FAILED) {
1085 lio_dev_err(lio_dev, "iq/oq config failed status: %x\n",
1087 /* Soft instr is freed by driver in case of failure. */
1088 goto nic_config_fail;
1091 /* Sleep on a wait queue till the cond flag indicates that the
1092 * response arrived or timed-out.
1094 while ((*sc->status_word == LIO_COMPLETION_WORD_INIT) && --timeout) {
1095 lio_flush_iq(lio_dev, lio_dev->instr_queue[sc->iq_no]);
1096 lio_process_ordered_list(lio_dev);
1100 retval = resp->status;
1102 lio_dev_err(lio_dev, "iq/oq config failed\n");
1103 goto nic_config_fail;
1106 lio_swap_8B_data((uint64_t *)(&resp->cfg_info),
1107 sizeof(struct octeon_if_cfg_info) >> 3);
1109 num_iqueues = lio_hweight64(resp->cfg_info.iqmask);
1110 num_oqueues = lio_hweight64(resp->cfg_info.oqmask);
1112 if (!(num_iqueues) || !(num_oqueues)) {
1113 lio_dev_err(lio_dev,
1114 "Got bad iqueues (%016lx) or oqueues (%016lx) from firmware.\n",
1115 (unsigned long)resp->cfg_info.iqmask,
1116 (unsigned long)resp->cfg_info.oqmask);
1117 goto nic_config_fail;
1120 lio_dev_dbg(lio_dev,
1121 "interface %d, iqmask %016lx, oqmask %016lx, numiqueues %d, numoqueues %d\n",
1122 eth_dev->data->port_id,
1123 (unsigned long)resp->cfg_info.iqmask,
1124 (unsigned long)resp->cfg_info.oqmask,
1125 num_iqueues, num_oqueues);
1127 lio_dev->linfo.num_rxpciq = num_oqueues;
1128 lio_dev->linfo.num_txpciq = num_iqueues;
1130 for (i = 0; i < num_oqueues; i++) {
1131 lio_dev->linfo.rxpciq[i].rxpciq64 =
1132 resp->cfg_info.linfo.rxpciq[i].rxpciq64;
1133 lio_dev_dbg(lio_dev, "index %d OQ %d\n",
1134 i, lio_dev->linfo.rxpciq[i].s.q_no);
1137 for (i = 0; i < num_iqueues; i++) {
1138 lio_dev->linfo.txpciq[i].txpciq64 =
1139 resp->cfg_info.linfo.txpciq[i].txpciq64;
1140 lio_dev_dbg(lio_dev, "index %d IQ %d\n",
1141 i, lio_dev->linfo.txpciq[i].s.q_no);
1144 lio_dev->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;
1145 lio_dev->linfo.gmxport = resp->cfg_info.linfo.gmxport;
1146 lio_dev->linfo.link.link_status64 =
1147 resp->cfg_info.linfo.link.link_status64;
1149 /* 64-bit swap required on LE machines */
1150 lio_swap_8B_data(&lio_dev->linfo.hw_addr, 1);
1151 for (i = 0; i < ETHER_ADDR_LEN; i++)
1152 mac[i] = *((uint8_t *)(((uint8_t *)&lio_dev->linfo.hw_addr) +
1155 /* Copy the permanent MAC address */
1156 ether_addr_copy((struct ether_addr *)mac, ð_dev->data->mac_addrs[0]);
1158 lio_dev->glist_lock =
1159 rte_zmalloc(NULL, sizeof(*lio_dev->glist_lock) * num_iqueues, 0);
1160 if (lio_dev->glist_lock == NULL)
1163 lio_dev->glist_head =
1164 rte_zmalloc(NULL, sizeof(*lio_dev->glist_head) * num_iqueues,
1166 if (lio_dev->glist_head == NULL) {
1167 rte_free(lio_dev->glist_lock);
1168 lio_dev->glist_lock = NULL;
1172 lio_dev_link_update(eth_dev, 0);
1174 lio_dev->port_configured = 1;
1176 lio_free_soft_command(sc);
1178 /* Disable iq_0 for reconf */
1179 lio_dev->fn_list.disable_io_queues(lio_dev);
1181 /* Reset ioq regs */
1182 lio_dev->fn_list.setup_device_regs(lio_dev);
1184 /* Free iq_0 used during init */
1185 lio_free_instr_queue0(lio_dev);
1190 lio_dev_err(lio_dev, "Failed retval %d\n", retval);
1191 lio_free_soft_command(sc);
1192 lio_free_instr_queue0(lio_dev);
1197 /* Define our ethernet definitions */
1198 static const struct eth_dev_ops liovf_eth_dev_ops = {
1199 .dev_configure = lio_dev_configure,
1200 .dev_start = lio_dev_start,
1201 .dev_set_link_up = lio_dev_set_link_up,
1202 .dev_set_link_down = lio_dev_set_link_down,
1203 .allmulticast_enable = lio_dev_allmulticast_enable,
1204 .allmulticast_disable = lio_dev_allmulticast_disable,
1205 .link_update = lio_dev_link_update,
1206 .dev_infos_get = lio_dev_info_get,
1207 .rx_queue_setup = lio_dev_rx_queue_setup,
1208 .rx_queue_release = lio_dev_rx_queue_release,
1209 .tx_queue_setup = lio_dev_tx_queue_setup,
1210 .tx_queue_release = lio_dev_tx_queue_release,
1211 .reta_update = lio_dev_rss_reta_update,
1212 .reta_query = lio_dev_rss_reta_query,
1213 .rss_hash_conf_get = lio_dev_rss_hash_conf_get,
1214 .rss_hash_update = lio_dev_rss_hash_update,
1218 lio_check_pf_hs_response(void *lio_dev)
1220 struct lio_device *dev = lio_dev;
1222 /* check till response arrives */
1223 if (dev->pfvf_hsword.coproc_tics_per_us)
1226 cn23xx_vf_handle_mbox(dev);
1228 rte_eal_alarm_set(1, lio_check_pf_hs_response, lio_dev);
1232 * \brief Identify the LIO device and to map the BAR address space
1233 * @param lio_dev lio device
1236 lio_chip_specific_setup(struct lio_device *lio_dev)
1238 struct rte_pci_device *pdev = lio_dev->pci_dev;
1239 uint32_t dev_id = pdev->id.device_id;
1244 case LIO_CN23XX_VF_VID:
1245 lio_dev->chip_id = LIO_CN23XX_VF_VID;
1246 ret = cn23xx_vf_setup_device(lio_dev);
1251 lio_dev_err(lio_dev, "Unsupported Chip\n");
1255 lio_dev_info(lio_dev, "DEVICE : %s\n", s);
1261 lio_first_time_init(struct lio_device *lio_dev,
1262 struct rte_pci_device *pdev)
1266 PMD_INIT_FUNC_TRACE();
1268 /* set dpdk specific pci device pointer */
1269 lio_dev->pci_dev = pdev;
1271 /* Identify the LIO type and set device ops */
1272 if (lio_chip_specific_setup(lio_dev)) {
1273 lio_dev_err(lio_dev, "Chip specific setup failed\n");
1277 /* Initialize soft command buffer pool */
1278 if (lio_setup_sc_buffer_pool(lio_dev)) {
1279 lio_dev_err(lio_dev, "sc buffer pool allocation failed\n");
1283 /* Initialize lists to manage the requests of different types that
1284 * arrive from applications for this lio device.
1286 lio_setup_response_list(lio_dev);
1288 if (lio_dev->fn_list.setup_mbox(lio_dev)) {
1289 lio_dev_err(lio_dev, "Mailbox setup failed\n");
1293 /* Check PF response */
1294 lio_check_pf_hs_response((void *)lio_dev);
1296 /* Do handshake and exit if incompatible PF driver */
1297 if (cn23xx_pfvf_handshake(lio_dev))
1301 cn23xx_vf_ask_pf_to_do_flr(lio_dev);
1302 /* Wait for FLR for 100ms per SRIOV specification */
1305 if (cn23xx_vf_set_io_queues_off(lio_dev)) {
1306 lio_dev_err(lio_dev, "Setting io queues off failed\n");
1310 if (lio_dev->fn_list.setup_device_regs(lio_dev)) {
1311 lio_dev_err(lio_dev, "Failed to configure device registers\n");
1315 if (lio_setup_instr_queue0(lio_dev)) {
1316 lio_dev_err(lio_dev, "Failed to setup instruction queue 0\n");
1320 dpdk_queues = (int)lio_dev->sriov_info.rings_per_vf;
1322 lio_dev->max_tx_queues = dpdk_queues;
1323 lio_dev->max_rx_queues = dpdk_queues;
1325 /* Enable input and output queues for this device */
1326 if (lio_dev->fn_list.enable_io_queues(lio_dev))
1332 lio_free_sc_buffer_pool(lio_dev);
1333 if (lio_dev->mbox[0])
1334 lio_dev->fn_list.free_mbox(lio_dev);
1335 if (lio_dev->instr_queue[0])
1336 lio_free_instr_queue0(lio_dev);
1342 lio_eth_dev_uninit(struct rte_eth_dev *eth_dev)
1344 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1346 PMD_INIT_FUNC_TRACE();
1348 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1351 /* lio_free_sc_buffer_pool */
1352 lio_free_sc_buffer_pool(lio_dev);
1354 rte_free(eth_dev->data->mac_addrs);
1355 eth_dev->data->mac_addrs = NULL;
1357 eth_dev->rx_pkt_burst = NULL;
1358 eth_dev->tx_pkt_burst = NULL;
1364 lio_eth_dev_init(struct rte_eth_dev *eth_dev)
1366 struct rte_pci_device *pdev = RTE_DEV_TO_PCI(eth_dev->device);
1367 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1369 PMD_INIT_FUNC_TRACE();
1371 eth_dev->rx_pkt_burst = &lio_dev_recv_pkts;
1372 eth_dev->tx_pkt_burst = &lio_dev_xmit_pkts;
1374 /* Primary does the initialization. */
1375 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1378 rte_eth_copy_pci_info(eth_dev, pdev);
1379 eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
1381 if (pdev->mem_resource[0].addr) {
1382 lio_dev->hw_addr = pdev->mem_resource[0].addr;
1384 PMD_INIT_LOG(ERR, "ERROR: Failed to map BAR0\n");
1388 lio_dev->eth_dev = eth_dev;
1389 /* set lio device print string */
1390 snprintf(lio_dev->dev_string, sizeof(lio_dev->dev_string),
1391 "%s[%02x:%02x.%x]", pdev->driver->driver.name,
1392 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
1394 lio_dev->port_id = eth_dev->data->port_id;
1396 if (lio_first_time_init(lio_dev, pdev)) {
1397 lio_dev_err(lio_dev, "Device init failed\n");
1401 eth_dev->dev_ops = &liovf_eth_dev_ops;
1402 eth_dev->data->mac_addrs = rte_zmalloc("lio", ETHER_ADDR_LEN, 0);
1403 if (eth_dev->data->mac_addrs == NULL) {
1404 lio_dev_err(lio_dev,
1405 "MAC addresses memory allocation failed\n");
1406 eth_dev->dev_ops = NULL;
1407 eth_dev->rx_pkt_burst = NULL;
1408 eth_dev->tx_pkt_burst = NULL;
1412 rte_atomic64_set(&lio_dev->status, LIO_DEV_RUNNING);
1415 lio_dev->port_configured = 0;
1416 /* Always allow unicast packets */
1417 lio_dev->ifflags |= LIO_IFFLAG_UNICAST;
1422 /* Set of PCI devices this driver supports */
1423 static const struct rte_pci_id pci_id_liovf_map[] = {
1424 { RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, LIO_CN23XX_VF_VID) },
1425 { .vendor_id = 0, /* sentinel */ }
1428 static struct eth_driver rte_liovf_pmd = {
1430 .id_table = pci_id_liovf_map,
1431 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
1432 .probe = rte_eth_dev_pci_probe,
1433 .remove = rte_eth_dev_pci_remove,
1435 .eth_dev_init = lio_eth_dev_init,
1436 .eth_dev_uninit = lio_eth_dev_uninit,
1437 .dev_private_size = sizeof(struct lio_device),
1440 RTE_PMD_REGISTER_PCI(net_liovf, rte_liovf_pmd.pci_drv);
1441 RTE_PMD_REGISTER_PCI_TABLE(net_liovf, pci_id_liovf_map);
1442 RTE_PMD_REGISTER_KMOD_DEP(net_liovf, "* igb_uio | vfio");